1 /* 2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/aotLinkedClassBulkLoader.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "classfile/javaClasses.inline.hpp" 29 #include "classfile/symbolTable.hpp" 30 #include "classfile/vmClasses.hpp" 31 #include "classfile/vmSymbols.hpp" 32 #include "code/codeCache.hpp" 33 #include "code/codeHeapState.hpp" 34 #include "code/dependencyContext.hpp" 35 #include "code/SCCache.hpp" 36 #include "compiler/compilationLog.hpp" 37 #include "compiler/compilationMemoryStatistic.hpp" 38 #include "compiler/compilationPolicy.hpp" 39 #include "compiler/compileBroker.hpp" 40 #include "compiler/compilerDefinitions.inline.hpp" 41 #include "compiler/compileLog.hpp" 42 #include "compiler/compilerEvent.hpp" 43 #include "compiler/compilerOracle.hpp" 44 #include "compiler/directivesParser.hpp" 45 #include "compiler/recompilationPolicy.hpp" 46 #include "gc/shared/memAllocator.hpp" 47 #include "interpreter/linkResolver.hpp" 48 #include "jvm.h" 49 #include "jfr/jfrEvents.hpp" 50 #include "logging/log.hpp" 51 #include "logging/logStream.hpp" 52 #include "memory/allocation.inline.hpp" 53 #include "memory/resourceArea.hpp" 54 #include "memory/universe.hpp" 55 #include "oops/methodData.hpp" 56 #include "oops/method.inline.hpp" 57 #include "oops/oop.inline.hpp" 58 #include "prims/jvmtiExport.hpp" 59 #include "prims/nativeLookup.hpp" 60 #include "prims/whitebox.hpp" 61 #include "runtime/atomic.hpp" 62 #include "runtime/escapeBarrier.hpp" 63 #include "runtime/globals_extension.hpp" 64 #include "runtime/handles.inline.hpp" 65 #include "runtime/init.hpp" 66 #include "runtime/interfaceSupport.inline.hpp" 67 #include "runtime/java.hpp" 68 #include "runtime/javaCalls.hpp" 69 #include "runtime/jniHandles.inline.hpp" 70 #include "runtime/os.hpp" 71 #include "runtime/perfData.hpp" 72 #include "runtime/safepointVerifiers.hpp" 73 #include "runtime/sharedRuntime.hpp" 74 #include "runtime/threads.hpp" 75 #include "runtime/threadSMR.inline.hpp" 76 #include "runtime/timerTrace.hpp" 77 #include "runtime/vframe.inline.hpp" 78 #include "services/management.hpp" 79 #include "utilities/debug.hpp" 80 #include "utilities/dtrace.hpp" 81 #include "utilities/events.hpp" 82 #include "utilities/formatBuffer.hpp" 83 #include "utilities/macros.hpp" 84 #include "utilities/nonblockingQueue.inline.hpp" 85 #ifdef COMPILER1 86 #include "c1/c1_Compiler.hpp" 87 #endif 88 #ifdef COMPILER2 89 #include "opto/c2compiler.hpp" 90 #endif 91 #if INCLUDE_JVMCI 92 #include "jvmci/jvmciEnv.hpp" 93 #include "jvmci/jvmciRuntime.hpp" 94 #endif 95 96 #ifdef DTRACE_ENABLED 97 98 // Only bother with this argument setup if dtrace is available 99 100 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \ 101 { \ 102 Symbol* klass_name = (method)->klass_name(); \ 103 Symbol* name = (method)->name(); \ 104 Symbol* signature = (method)->signature(); \ 105 HOTSPOT_METHOD_COMPILE_BEGIN( \ 106 (char *) comp_name, strlen(comp_name), \ 107 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 108 (char *) name->bytes(), name->utf8_length(), \ 109 (char *) signature->bytes(), signature->utf8_length()); \ 110 } 111 112 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \ 113 { \ 114 Symbol* klass_name = (method)->klass_name(); \ 115 Symbol* name = (method)->name(); \ 116 Symbol* signature = (method)->signature(); \ 117 HOTSPOT_METHOD_COMPILE_END( \ 118 (char *) comp_name, strlen(comp_name), \ 119 (char *) klass_name->bytes(), klass_name->utf8_length(), \ 120 (char *) name->bytes(), name->utf8_length(), \ 121 (char *) signature->bytes(), signature->utf8_length(), (success)); \ 122 } 123 124 #else // ndef DTRACE_ENABLED 125 126 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) 127 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) 128 129 #endif // ndef DTRACE_ENABLED 130 131 bool CompileBroker::_initialized = false; 132 bool CompileBroker::_replay_initialized = false; 133 volatile bool CompileBroker::_should_block = false; 134 volatile int CompileBroker::_print_compilation_warning = 0; 135 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation; 136 137 // The installed compiler(s) 138 AbstractCompiler* CompileBroker::_compilers[3]; 139 140 // The maximum numbers of compiler threads to be determined during startup. 141 int CompileBroker::_c1_count = 0; 142 int CompileBroker::_c2_count = 0; 143 int CompileBroker::_c3_count = 0; 144 int CompileBroker::_sc_count = 0; 145 146 // An array of compiler names as Java String objects 147 jobject* CompileBroker::_compiler1_objects = nullptr; 148 jobject* CompileBroker::_compiler2_objects = nullptr; 149 jobject* CompileBroker::_compiler3_objects = nullptr; 150 jobject* CompileBroker::_sc_objects = nullptr; 151 152 CompileLog** CompileBroker::_compiler1_logs = nullptr; 153 CompileLog** CompileBroker::_compiler2_logs = nullptr; 154 CompileLog** CompileBroker::_compiler3_logs = nullptr; 155 CompileLog** CompileBroker::_sc_logs = nullptr; 156 157 // These counters are used to assign an unique ID to each compilation. 158 volatile jint CompileBroker::_compilation_id = 0; 159 volatile jint CompileBroker::_osr_compilation_id = 0; 160 volatile jint CompileBroker::_native_compilation_id = 0; 161 162 // Performance counters 163 PerfCounter* CompileBroker::_perf_total_compilation = nullptr; 164 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr; 165 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr; 166 167 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr; 168 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr; 169 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr; 170 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr; 171 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr; 172 173 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr; 174 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr; 175 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr; 176 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr; 177 178 PerfStringVariable* CompileBroker::_perf_last_method = nullptr; 179 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr; 180 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr; 181 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr; 182 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr; 183 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr; 184 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr; 185 186 // Timers and counters for generating statistics 187 elapsedTimer CompileBroker::_t_total_compilation; 188 elapsedTimer CompileBroker::_t_osr_compilation; 189 elapsedTimer CompileBroker::_t_standard_compilation; 190 elapsedTimer CompileBroker::_t_invalidated_compilation; 191 elapsedTimer CompileBroker::_t_bailedout_compilation; 192 193 uint CompileBroker::_total_bailout_count = 0; 194 uint CompileBroker::_total_invalidated_count = 0; 195 uint CompileBroker::_total_not_entrant_count = 0; 196 uint CompileBroker::_total_compile_count = 0; 197 uint CompileBroker::_total_osr_compile_count = 0; 198 uint CompileBroker::_total_standard_compile_count = 0; 199 uint CompileBroker::_total_compiler_stopped_count = 0; 200 uint CompileBroker::_total_compiler_restarted_count = 0; 201 202 uint CompileBroker::_sum_osr_bytes_compiled = 0; 203 uint CompileBroker::_sum_standard_bytes_compiled = 0; 204 uint CompileBroker::_sum_nmethod_size = 0; 205 uint CompileBroker::_sum_nmethod_code_size = 0; 206 207 jlong CompileBroker::_peak_compilation_time = 0; 208 209 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization]; 210 CompilerStatistics CompileBroker::_scc_stats; 211 CompilerStatistics CompileBroker::_scc_stats_per_level[CompLevel_full_optimization + 1]; 212 213 CompileQueue* CompileBroker::_c3_compile_queue = nullptr; 214 CompileQueue* CompileBroker::_c2_compile_queue = nullptr; 215 CompileQueue* CompileBroker::_c1_compile_queue = nullptr; 216 CompileQueue* CompileBroker::_sc1_compile_queue = nullptr; 217 CompileQueue* CompileBroker::_sc2_compile_queue = nullptr; 218 219 bool compileBroker_init() { 220 if (LogEvents) { 221 CompilationLog::init(); 222 } 223 224 // init directives stack, adding default directive 225 DirectivesStack::init(); 226 227 if (DirectivesParser::has_file()) { 228 return DirectivesParser::parse_from_flag(); 229 } else if (CompilerDirectivesPrint) { 230 // Print default directive even when no other was added 231 DirectivesStack::print(tty); 232 } 233 234 return true; 235 } 236 237 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) { 238 CompilerThread* thread = CompilerThread::current(); 239 thread->set_task(task); 240 CompileLog* log = thread->log(); 241 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log); 242 } 243 244 CompileTaskWrapper::~CompileTaskWrapper() { 245 CompilerThread* thread = CompilerThread::current(); 246 CompileTask* task = thread->task(); 247 CompileLog* log = thread->log(); 248 AbstractCompiler* comp = thread->compiler(); 249 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log); 250 thread->set_task(nullptr); 251 thread->set_env(nullptr); 252 if (task->is_blocking()) { 253 bool free_task = false; 254 { 255 MutexLocker notifier(thread, task->lock()); 256 task->mark_complete(); 257 #if INCLUDE_JVMCI 258 if (comp->is_jvmci()) { 259 if (!task->has_waiter()) { 260 // The waiting thread timed out and thus did not free the task. 261 free_task = true; 262 } 263 task->set_blocking_jvmci_compile_state(nullptr); 264 } 265 #endif 266 if (!free_task) { 267 // Notify the waiting thread that the compilation has completed 268 // so that it can free the task. 269 task->lock()->notify_all(); 270 } 271 } 272 if (free_task) { 273 // The task can only be freed once the task lock is released. 274 CompileTask::free(task); 275 } 276 } else { 277 task->mark_complete(); 278 279 // By convention, the compiling thread is responsible for 280 // recycling a non-blocking CompileTask. 281 CompileTask::free(task); 282 } 283 } 284 285 /** 286 * Check if a CompilerThread can be removed and update count if requested. 287 */ 288 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) { 289 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here"); 290 if (!ReduceNumberOfCompilerThreads) return false; 291 292 if (RecompilationPolicy::have_recompilation_work()) return false; 293 294 AbstractCompiler *compiler = ct->compiler(); 295 int compiler_count = compiler->num_compiler_threads(); 296 bool c1 = compiler->is_c1(); 297 298 // Keep at least 1 compiler thread of each type. 299 if (compiler_count < 2) return false; 300 301 // Keep thread alive for at least some time. 302 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false; 303 304 #if INCLUDE_JVMCI 305 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 306 // Handles for JVMCI thread objects may get released concurrently. 307 if (do_it) { 308 assert(CompileThread_lock->owner() == ct, "must be holding lock"); 309 } else { 310 // Skip check if it's the last thread and let caller check again. 311 return true; 312 } 313 } 314 #endif 315 316 // We only allow the last compiler thread of each type to get removed. 317 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1) 318 : compiler2_object(compiler_count - 1); 319 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) { 320 if (do_it) { 321 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent. 322 compiler->set_num_compiler_threads(compiler_count - 1); 323 #if INCLUDE_JVMCI 324 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) { 325 // Old j.l.Thread object can die when no longer referenced elsewhere. 326 JNIHandles::destroy_global(compiler2_object(compiler_count - 1)); 327 _compiler2_objects[compiler_count - 1] = nullptr; 328 } 329 #endif 330 } 331 return true; 332 } 333 return false; 334 } 335 336 /** 337 * Add a CompileTask to a CompileQueue. 338 */ 339 void CompileQueue::add(CompileTask* task) { 340 assert(_lock->owned_by_self(), "must own lock"); 341 342 task->set_next(nullptr); 343 task->set_prev(nullptr); 344 345 if (_last == nullptr) { 346 // The compile queue is empty. 347 assert(_first == nullptr, "queue is empty"); 348 _first = task; 349 _last = task; 350 } else { 351 // Append the task to the queue. 352 assert(_last->next() == nullptr, "not last"); 353 _last->set_next(task); 354 task->set_prev(_last); 355 _last = task; 356 } 357 ++_size; 358 ++_total_added; 359 if (_size > _peak_size) { 360 _peak_size = _size; 361 } 362 363 // Mark the method as being in the compile queue. 364 task->method()->set_queued_for_compilation(); 365 366 task->mark_queued(os::elapsed_counter()); 367 368 if (CIPrintCompileQueue) { 369 print_tty(); 370 } 371 372 if (LogCompilation && xtty != nullptr) { 373 task->log_task_queued(); 374 } 375 376 if (TrainingData::need_data() && 377 !CDSConfig::is_dumping_final_static_archive()) { // FIXME: !!! MetaspaceShared::preload_and_dump() temporarily enables RecordTraining !!! 378 CompileTrainingData* tdata = CompileTrainingData::make(task); 379 if (tdata != nullptr) { 380 tdata->record_compilation_queued(task); 381 task->set_training_data(tdata); 382 } 383 } 384 385 // Notify CompilerThreads that a task is available. 386 _lock->notify_all(); 387 } 388 389 void CompileQueue::add_pending(CompileTask* task) { 390 assert(_lock->owned_by_self() == false, "must NOT own lock"); 391 assert(UseLockFreeCompileQueues, ""); 392 task->method()->set_queued_for_compilation(); 393 _queue.push(*task); 394 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks? 395 if (is_empty()) { 396 MutexLocker ml(_lock); 397 _lock->notify_all(); 398 } 399 } 400 401 static bool process_pending(CompileTask* task) { 402 // guarantee(task->method()->queued_for_compilation(), ""); 403 if (task->is_unloaded()) { 404 return true; // unloaded 405 } 406 task->method()->set_queued_for_compilation(); // FIXME 407 if (task->method()->pending_queue_processed()) { 408 return true; // already queued 409 } 410 // Mark the method as being in the compile queue. 411 task->method()->set_pending_queue_processed(); 412 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(), 413 task->requires_online_compilation(), task->compile_reason())) { 414 return true; // already compiled 415 } 416 return false; // active 417 } 418 419 void CompileQueue::transfer_pending() { 420 assert(_lock->owned_by_self(), "must own lock"); 421 while (!_queue.empty()) { 422 CompileTask* task = _queue.pop(); 423 bool is_stale = process_pending(task); 424 if (is_stale) { 425 task->set_next(_first_stale); 426 task->set_prev(nullptr); 427 _first_stale = task; 428 } else { 429 add(task); 430 } 431 } 432 } 433 434 /** 435 * Empties compilation queue by putting all compilation tasks onto 436 * a freelist. Furthermore, the method wakes up all threads that are 437 * waiting on a compilation task to finish. This can happen if background 438 * compilation is disabled. 439 */ 440 void CompileQueue::free_all() { 441 MutexLocker mu(_lock); 442 transfer_pending(); 443 444 CompileTask* next = _first; 445 446 // Iterate over all tasks in the compile queue 447 while (next != nullptr) { 448 CompileTask* current = next; 449 next = current->next(); 450 { 451 // Wake up thread that blocks on the compile task. 452 MutexLocker ct_lock(current->lock()); 453 current->lock()->notify(); 454 } 455 // Put the task back on the freelist. 456 CompileTask::free(current); 457 } 458 _first = nullptr; 459 _last = nullptr; 460 461 // Wake up all threads that block on the queue. 462 _lock->notify_all(); 463 } 464 465 /** 466 * Get the next CompileTask from a CompileQueue 467 */ 468 CompileTask* CompileQueue::get(CompilerThread* thread) { 469 // save methods from RedefineClasses across safepoint 470 // across compile queue lock below. 471 methodHandle save_method; 472 methodHandle save_hot_method; 473 474 MonitorLocker locker(_lock); 475 transfer_pending(); 476 477 RecompilationPolicy::sample_load_average(); 478 479 // If _first is null we have no more compile jobs. There are two reasons for 480 // having no compile jobs: First, we compiled everything we wanted. Second, 481 // we ran out of code cache so compilation has been disabled. In the latter 482 // case we perform code cache sweeps to free memory such that we can re-enable 483 // compilation. 484 while (_first == nullptr) { 485 // Exit loop if compilation is disabled forever 486 if (CompileBroker::is_compilation_disabled_forever()) { 487 return nullptr; 488 } 489 490 AbstractCompiler* compiler = thread->compiler(); 491 guarantee(compiler != nullptr, "Compiler object must exist"); 492 compiler->on_empty_queue(this, thread); 493 if (_first != nullptr) { 494 // The call to on_empty_queue may have temporarily unlocked the MCQ lock 495 // so check again whether any tasks were added to the queue. 496 break; 497 } 498 499 // If there are no compilation tasks and we can compile new jobs 500 // (i.e., there is enough free space in the code cache) there is 501 // no need to invoke the GC. 502 // We need a timed wait here, since compiler threads can exit if compilation 503 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads 504 // is not critical and we do not want idle compiler threads to wake up too often. 505 locker.wait(5*1000); 506 507 transfer_pending(); // reacquired lock 508 509 if (RecompilationPolicy::have_recompilation_work()) return nullptr; 510 511 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) { 512 // Still nothing to compile. Give caller a chance to stop this thread. 513 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr; 514 } 515 } 516 517 if (CompileBroker::is_compilation_disabled_forever()) { 518 return nullptr; 519 } 520 521 CompileTask* task; 522 { 523 NoSafepointVerifier nsv; 524 task = CompilationPolicy::select_task(this, thread); 525 if (task != nullptr) { 526 task = task->select_for_compilation(); 527 } 528 } 529 530 if (task != nullptr) { 531 // Save method pointers across unlock safepoint. The task is removed from 532 // the compilation queue, which is walked during RedefineClasses. 533 Thread* thread = Thread::current(); 534 save_method = methodHandle(thread, task->method()); 535 save_hot_method = methodHandle(thread, task->hot_method()); 536 537 remove(task); 538 } 539 purge_stale_tasks(); // may temporarily release MCQ lock 540 return task; 541 } 542 543 // Clean & deallocate stale compile tasks. 544 // Temporarily releases MethodCompileQueue lock. 545 void CompileQueue::purge_stale_tasks() { 546 assert(_lock->owned_by_self(), "must own lock"); 547 if (_first_stale != nullptr) { 548 // Stale tasks are purged when MCQ lock is released, 549 // but _first_stale updates are protected by MCQ lock. 550 // Once task processing starts and MCQ lock is released, 551 // other compiler threads can reuse _first_stale. 552 CompileTask* head = _first_stale; 553 _first_stale = nullptr; 554 { 555 MutexUnlocker ul(_lock); 556 for (CompileTask* task = head; task != nullptr; ) { 557 CompileTask* next_task = task->next(); 558 CompileTaskWrapper ctw(task); // Frees the task 559 task->set_failure_reason("stale task"); 560 task = next_task; 561 } 562 } 563 transfer_pending(); // transfer pending after reacquiring MCQ lock 564 } 565 } 566 567 void CompileQueue::remove(CompileTask* task) { 568 assert(_lock->owned_by_self(), "must own lock"); 569 if (task->prev() != nullptr) { 570 task->prev()->set_next(task->next()); 571 } else { 572 // max is the first element 573 assert(task == _first, "Sanity"); 574 _first = task->next(); 575 } 576 577 if (task->next() != nullptr) { 578 task->next()->set_prev(task->prev()); 579 } else { 580 // max is the last element 581 assert(task == _last, "Sanity"); 582 _last = task->prev(); 583 } 584 --_size; 585 ++_total_removed; 586 } 587 588 void CompileQueue::remove_and_mark_stale(CompileTask* task) { 589 assert(_lock->owned_by_self(), "must own lock"); 590 remove(task); 591 592 // Enqueue the task for reclamation (should be done outside MCQ lock) 593 task->set_next(_first_stale); 594 task->set_prev(nullptr); 595 _first_stale = task; 596 } 597 598 // methods in the compile queue need to be marked as used on the stack 599 // so that they don't get reclaimed by Redefine Classes 600 void CompileQueue::mark_on_stack() { 601 for (CompileTask* task = _first; task != nullptr; task = task->next()) { 602 task->mark_on_stack(); 603 } 604 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) { 605 assert(task != nullptr, ""); 606 task->mark_on_stack(); 607 } 608 } 609 610 611 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_scc) { 612 if (is_c2_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc2_compile_queue : _c2_compile_queue); 613 if (is_c1_compile(comp_level)) return ((is_scc && (_sc_count > 0)) ? _sc1_compile_queue : _c1_compile_queue); 614 return nullptr; 615 } 616 617 CompileQueue* CompileBroker::c1_compile_queue() { 618 return _c1_compile_queue; 619 } 620 621 CompileQueue* CompileBroker::c2_compile_queue() { 622 return _c2_compile_queue; 623 } 624 625 void CompileBroker::print_compile_queues(outputStream* st) { 626 st->print_cr("Current compiles: "); 627 628 char buf[2000]; 629 int buflen = sizeof(buf); 630 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true); 631 632 st->cr(); 633 if (_c1_compile_queue != nullptr) { 634 _c1_compile_queue->print(st); 635 } 636 if (_c2_compile_queue != nullptr) { 637 _c2_compile_queue->print(st); 638 } 639 if (_c3_compile_queue != nullptr) { 640 _c3_compile_queue->print(st); 641 } 642 if (_sc1_compile_queue != nullptr) { 643 _sc1_compile_queue->print(st); 644 } 645 if (_sc2_compile_queue != nullptr) { 646 _sc2_compile_queue->print(st); 647 } 648 } 649 650 void CompileQueue::print(outputStream* st) { 651 assert_locked_or_safepoint(_lock); 652 st->print_cr("%s:", name()); 653 CompileTask* task = _first; 654 if (task == nullptr) { 655 st->print_cr("Empty"); 656 } else { 657 while (task != nullptr) { 658 task->print(st, nullptr, true, true); 659 task = task->next(); 660 } 661 } 662 st->cr(); 663 } 664 665 void CompileQueue::print_tty() { 666 stringStream ss; 667 // Dump the compile queue into a buffer before locking the tty 668 print(&ss); 669 { 670 ttyLocker ttyl; 671 tty->print("%s", ss.freeze()); 672 } 673 } 674 675 CompilerCounters::CompilerCounters() { 676 _current_method[0] = '\0'; 677 _compile_type = CompileBroker::no_compile; 678 } 679 680 #if INCLUDE_JFR && COMPILER2_OR_JVMCI 681 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping 682 // in compiler/compilerEvent.cpp) and registers it with its serializer. 683 // 684 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp, 685 // so if c2 is used, it should be always registered first. 686 // This function is called during vm initialization. 687 static void register_jfr_phasetype_serializer(CompilerType compiler_type) { 688 ResourceMark rm; 689 static bool first_registration = true; 690 if (compiler_type == compiler_jvmci) { 691 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false); 692 first_registration = false; 693 #ifdef COMPILER2 694 } else if (compiler_type == compiler_c2) { 695 assert(first_registration, "invariant"); // c2 must be registered first. 696 for (int i = 0; i < PHASE_NUM_TYPES; i++) { 697 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i); 698 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false); 699 } 700 first_registration = false; 701 #endif // COMPILER2 702 } 703 } 704 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI 705 706 // ------------------------------------------------------------------ 707 // CompileBroker::compilation_init 708 // 709 // Initialize the Compilation object 710 void CompileBroker::compilation_init(JavaThread* THREAD) { 711 // No need to initialize compilation system if we do not use it. 712 if (!UseCompiler) { 713 return; 714 } 715 // Set the interface to the current compiler(s). 716 _c1_count = CompilationPolicy::c1_count(); 717 _c2_count = CompilationPolicy::c2_count(); 718 _c3_count = CompilationPolicy::c3_count(); 719 _sc_count = CompilationPolicy::sc_count(); 720 721 #if INCLUDE_JVMCI 722 if (EnableJVMCI) { 723 // This is creating a JVMCICompiler singleton. 724 JVMCICompiler* jvmci = new JVMCICompiler(); 725 726 if (UseJVMCICompiler) { 727 _compilers[1] = jvmci; 728 if (FLAG_IS_DEFAULT(JVMCIThreads)) { 729 if (BootstrapJVMCI) { 730 // JVMCI will bootstrap so give it more threads 731 _c2_count = MIN2(32, os::active_processor_count()); 732 } 733 } else { 734 _c2_count = JVMCIThreads; 735 } 736 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) { 737 } else { 738 #ifdef COMPILER1 739 _c1_count = JVMCIHostThreads; 740 #endif // COMPILER1 741 } 742 #ifdef COMPILER2 743 if (SCCache::is_on() && (_c3_count > 0)) { 744 _compilers[2] = new C2Compiler(); 745 } 746 #endif 747 } 748 } 749 #endif // INCLUDE_JVMCI 750 751 #ifdef COMPILER1 752 if (_c1_count > 0) { 753 _compilers[0] = new Compiler(); 754 } 755 #endif // COMPILER1 756 757 #ifdef COMPILER2 758 if (true JVMCI_ONLY( && !UseJVMCICompiler)) { 759 if (_c2_count > 0) { 760 _compilers[1] = new C2Compiler(); 761 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit. 762 // idToPhase mapping for c2 is in opto/phasetype.hpp 763 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);) 764 } 765 } 766 #endif // COMPILER2 767 768 #if INCLUDE_JVMCI 769 // Register after c2 registration. 770 // JVMCI CompilerPhaseType idToPhase mapping is dynamic. 771 if (EnableJVMCI) { 772 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);) 773 } 774 #endif // INCLUDE_JVMCI 775 776 if (CompilerOracle::should_collect_memstat()) { 777 CompilationMemoryStatistic::initialize(); 778 } 779 780 // Start the compiler thread(s) 781 init_compiler_threads(); 782 // totalTime performance counter is always created as it is required 783 // by the implementation of java.lang.management.CompilationMXBean. 784 { 785 // Ensure OOM leads to vm_exit_during_initialization. 786 EXCEPTION_MARK; 787 _perf_total_compilation = 788 PerfDataManager::create_counter(JAVA_CI, "totalTime", 789 PerfData::U_Ticks, CHECK); 790 } 791 792 if (UsePerfData) { 793 794 EXCEPTION_MARK; 795 796 // create the jvmstat performance counters 797 _perf_osr_compilation = 798 PerfDataManager::create_counter(SUN_CI, "osrTime", 799 PerfData::U_Ticks, CHECK); 800 801 _perf_standard_compilation = 802 PerfDataManager::create_counter(SUN_CI, "standardTime", 803 PerfData::U_Ticks, CHECK); 804 805 _perf_total_bailout_count = 806 PerfDataManager::create_counter(SUN_CI, "totalBailouts", 807 PerfData::U_Events, CHECK); 808 809 _perf_total_invalidated_count = 810 PerfDataManager::create_counter(SUN_CI, "totalInvalidates", 811 PerfData::U_Events, CHECK); 812 813 _perf_total_compile_count = 814 PerfDataManager::create_counter(SUN_CI, "totalCompiles", 815 PerfData::U_Events, CHECK); 816 _perf_total_osr_compile_count = 817 PerfDataManager::create_counter(SUN_CI, "osrCompiles", 818 PerfData::U_Events, CHECK); 819 820 _perf_total_standard_compile_count = 821 PerfDataManager::create_counter(SUN_CI, "standardCompiles", 822 PerfData::U_Events, CHECK); 823 824 _perf_sum_osr_bytes_compiled = 825 PerfDataManager::create_counter(SUN_CI, "osrBytes", 826 PerfData::U_Bytes, CHECK); 827 828 _perf_sum_standard_bytes_compiled = 829 PerfDataManager::create_counter(SUN_CI, "standardBytes", 830 PerfData::U_Bytes, CHECK); 831 832 _perf_sum_nmethod_size = 833 PerfDataManager::create_counter(SUN_CI, "nmethodSize", 834 PerfData::U_Bytes, CHECK); 835 836 _perf_sum_nmethod_code_size = 837 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize", 838 PerfData::U_Bytes, CHECK); 839 840 _perf_last_method = 841 PerfDataManager::create_string_variable(SUN_CI, "lastMethod", 842 CompilerCounters::cmname_buffer_length, 843 "", CHECK); 844 845 _perf_last_failed_method = 846 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod", 847 CompilerCounters::cmname_buffer_length, 848 "", CHECK); 849 850 _perf_last_invalidated_method = 851 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod", 852 CompilerCounters::cmname_buffer_length, 853 "", CHECK); 854 855 _perf_last_compile_type = 856 PerfDataManager::create_variable(SUN_CI, "lastType", 857 PerfData::U_None, 858 (jlong)CompileBroker::no_compile, 859 CHECK); 860 861 _perf_last_compile_size = 862 PerfDataManager::create_variable(SUN_CI, "lastSize", 863 PerfData::U_Bytes, 864 (jlong)CompileBroker::no_compile, 865 CHECK); 866 867 868 _perf_last_failed_type = 869 PerfDataManager::create_variable(SUN_CI, "lastFailedType", 870 PerfData::U_None, 871 (jlong)CompileBroker::no_compile, 872 CHECK); 873 874 _perf_last_invalidated_type = 875 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType", 876 PerfData::U_None, 877 (jlong)CompileBroker::no_compile, 878 CHECK); 879 } 880 881 log_info(scc, init)("CompileBroker is initialized"); 882 _initialized = true; 883 } 884 885 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) { 886 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH); 887 return thread_oop; 888 } 889 890 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) { 891 CompilationPolicy::replay_training_at_init_loop(thread); 892 } 893 894 #if defined(ASSERT) && COMPILER2_OR_JVMCI 895 // Stress testing. Dedicated threads revert optimizations based on escape analysis concurrently to 896 // the running java application. Configured with vm options DeoptimizeObjectsALot*. 897 class DeoptimizeObjectsALotThread : public JavaThread { 898 899 static void deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS); 900 void deoptimize_objects_alot_loop_single(); 901 void deoptimize_objects_alot_loop_all(); 902 903 public: 904 DeoptimizeObjectsALotThread() : JavaThread(&deopt_objs_alot_thread_entry) { } 905 906 bool is_hidden_from_external_view() const { return true; } 907 }; 908 909 // Entry for DeoptimizeObjectsALotThread. The threads are started in 910 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled 911 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) { 912 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread); 913 bool enter_single_loop; 914 { 915 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag); 916 static int single_thread_count = 0; 917 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle; 918 } 919 if (enter_single_loop) { 920 dt->deoptimize_objects_alot_loop_single(); 921 } else { 922 dt->deoptimize_objects_alot_loop_all(); 923 } 924 } 925 926 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 927 // barrier targets a single thread which is selected round robin. 928 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() { 929 HandleMark hm(this); 930 while (true) { 931 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) { 932 { // Begin new scope for escape barrier 933 HandleMarkCleaner hmc(this); 934 ResourceMark rm(this); 935 EscapeBarrier eb(true, this, deoptee_thread); 936 eb.deoptimize_objects(100); 937 } 938 // Now sleep after the escape barriers destructor resumed deoptee_thread. 939 sleep(DeoptimizeObjectsALotInterval); 940 } 941 } 942 } 943 944 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each 945 // barrier targets all java threads in the vm at once. 946 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() { 947 HandleMark hm(this); 948 while (true) { 949 { // Begin new scope for escape barrier 950 HandleMarkCleaner hmc(this); 951 ResourceMark rm(this); 952 EscapeBarrier eb(true, this); 953 eb.deoptimize_objects_all_threads(); 954 } 955 // Now sleep after the escape barriers destructor resumed the java threads. 956 sleep(DeoptimizeObjectsALotInterval); 957 } 958 } 959 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 960 961 962 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) { 963 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle)); 964 965 if (java_lang_Thread::thread(thread_oop()) != nullptr) { 966 assert(type == compiler_t, "should only happen with reused compiler threads"); 967 // The compiler thread hasn't actually exited yet so don't try to reuse it 968 return nullptr; 969 } 970 971 JavaThread* new_thread = nullptr; 972 switch (type) { 973 case compiler_t: 974 assert(comp != nullptr, "Compiler instance missing."); 975 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) { 976 CompilerCounters* counters = new CompilerCounters(); 977 new_thread = new CompilerThread(queue, counters); 978 } 979 break; 980 #if defined(ASSERT) && COMPILER2_OR_JVMCI 981 case deoptimizer_t: 982 new_thread = new DeoptimizeObjectsALotThread(); 983 break; 984 #endif // ASSERT 985 case training_replay_t: 986 new_thread = new TrainingReplayThread(); 987 break; 988 default: 989 ShouldNotReachHere(); 990 } 991 992 // At this point the new CompilerThread data-races with this startup 993 // thread (which is the main thread and NOT the VM thread). 994 // This means Java bytecodes being executed at startup can 995 // queue compile jobs which will run at whatever default priority the 996 // newly created CompilerThread runs at. 997 998 999 // At this point it may be possible that no osthread was created for the 1000 // JavaThread due to lack of resources. We will handle that failure below. 1001 // Also check new_thread so that static analysis is happy. 1002 if (new_thread != nullptr && new_thread->osthread() != nullptr) { 1003 1004 if (type == compiler_t) { 1005 CompilerThread::cast(new_thread)->set_compiler(comp); 1006 } 1007 1008 // Note that we cannot call os::set_priority because it expects Java 1009 // priorities and we are *explicitly* using OS priorities so that it's 1010 // possible to set the compiler thread priority higher than any Java 1011 // thread. 1012 1013 int native_prio = CompilerThreadPriority; 1014 if (native_prio == -1) { 1015 if (UseCriticalCompilerThreadPriority) { 1016 native_prio = os::java_to_os_priority[CriticalPriority]; 1017 } else { 1018 native_prio = os::java_to_os_priority[NearMaxPriority]; 1019 } 1020 } 1021 os::set_native_priority(new_thread, native_prio); 1022 1023 // Note that this only sets the JavaThread _priority field, which by 1024 // definition is limited to Java priorities and not OS priorities. 1025 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority); 1026 1027 } else { // osthread initialization failure 1028 if (UseDynamicNumberOfCompilerThreads && type == compiler_t 1029 && comp->num_compiler_threads() > 0) { 1030 // The new thread is not known to Thread-SMR yet so we can just delete. 1031 delete new_thread; 1032 return nullptr; 1033 } else { 1034 vm_exit_during_initialization("java.lang.OutOfMemoryError", 1035 os::native_thread_creation_failed_msg()); 1036 } 1037 } 1038 1039 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS) 1040 1041 return new_thread; 1042 } 1043 1044 static bool trace_compiler_threads() { 1045 LogTarget(Debug, jit, thread) lt; 1046 return TraceCompilerThreads || lt.is_enabled(); 1047 } 1048 1049 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) { 1050 char name_buffer[256]; 1051 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i); 1052 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL); 1053 return JNIHandles::make_global(thread_oop); 1054 } 1055 1056 static void print_compiler_threads(stringStream& msg) { 1057 if (TraceCompilerThreads) { 1058 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string()); 1059 } 1060 LogTarget(Debug, jit, thread) lt; 1061 if (lt.is_enabled()) { 1062 LogStream ls(lt); 1063 ls.print_cr("%s", msg.as_string()); 1064 } 1065 } 1066 1067 static void print_compiler_thread(JavaThread *ct) { 1068 if (trace_compiler_threads()) { 1069 ResourceMark rm; 1070 ThreadsListHandle tlh; // name() depends on the TLH. 1071 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1072 stringStream msg; 1073 msg.print("Added initial compiler thread %s", ct->name()); 1074 print_compiler_threads(msg); 1075 } 1076 } 1077 1078 void CompileBroker::init_compiler_threads() { 1079 // Ensure any exceptions lead to vm_exit_during_initialization. 1080 EXCEPTION_MARK; 1081 #if !defined(ZERO) 1082 assert(_c2_count > 0 || _c1_count > 0, "No compilers?"); 1083 #endif // !ZERO 1084 // Initialize the compilation queue 1085 if (_c2_count > 0) { 1086 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue"; 1087 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock); 1088 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler); 1089 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler); 1090 } 1091 if (_c1_count > 0) { 1092 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock); 1093 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler); 1094 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler); 1095 } 1096 1097 if (_c3_count > 0) { 1098 const char* name = "C2 compile queue"; 1099 _c3_compile_queue = new CompileQueue(name, MethodCompileQueueC3_lock); 1100 _compiler3_objects = NEW_C_HEAP_ARRAY(jobject, _c3_count, mtCompiler); 1101 _compiler3_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c3_count, mtCompiler); 1102 } 1103 if (_sc_count > 0) { 1104 if (_c1_count > 0) { // C1 is present 1105 _sc1_compile_queue = new CompileQueue("C1 SC compile queue", MethodCompileQueueSC1_lock); 1106 } 1107 if (_c2_count > 0) { // C2 is present 1108 _sc2_compile_queue = new CompileQueue("C2 SC compile queue", MethodCompileQueueSC2_lock); 1109 } 1110 _sc_objects = NEW_C_HEAP_ARRAY(jobject, _sc_count, mtCompiler); 1111 _sc_logs = NEW_C_HEAP_ARRAY(CompileLog*, _sc_count, mtCompiler); 1112 } 1113 char name_buffer[256]; 1114 1115 for (int i = 0; i < _c2_count; i++) { 1116 // Create a name for our thread. 1117 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK); 1118 _compiler2_objects[i] = thread_handle; 1119 _compiler2_logs[i] = nullptr; 1120 1121 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1122 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD); 1123 assert(ct != nullptr, "should have been handled for initial thread"); 1124 _compilers[1]->set_num_compiler_threads(i + 1); 1125 print_compiler_thread(ct); 1126 } 1127 } 1128 1129 for (int i = 0; i < _c1_count; i++) { 1130 // Create a name for our thread. 1131 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK); 1132 _compiler1_objects[i] = thread_handle; 1133 _compiler1_logs[i] = nullptr; 1134 1135 if (!UseDynamicNumberOfCompilerThreads || i == 0) { 1136 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD); 1137 assert(ct != nullptr, "should have been handled for initial thread"); 1138 _compilers[0]->set_num_compiler_threads(i + 1); 1139 print_compiler_thread(ct); 1140 } 1141 } 1142 1143 for (int i = 0; i < _c3_count; i++) { 1144 // Create a name for our thread. 1145 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C2 CompilerThread%d", i); 1146 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1147 jobject thread_handle = JNIHandles::make_global(thread_oop); 1148 _compiler3_objects[i] = thread_handle; 1149 _compiler3_logs[i] = nullptr; 1150 1151 JavaThread *ct = make_thread(compiler_t, thread_handle, _c3_compile_queue, _compilers[2], THREAD); 1152 assert(ct != nullptr, "should have been handled for initial thread"); 1153 _compilers[2]->set_num_compiler_threads(i + 1); 1154 print_compiler_thread(ct); 1155 } 1156 1157 if (_sc_count > 0) { 1158 int i = 0; 1159 if (_c1_count > 0) { // C1 is present 1160 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 1); 1161 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1162 jobject thread_handle = JNIHandles::make_global(thread_oop); 1163 _sc_objects[i] = thread_handle; 1164 _sc_logs[i] = nullptr; 1165 i++; 1166 1167 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc1_compile_queue, _compilers[0], THREAD); 1168 assert(ct != nullptr, "should have been handled for initial thread"); 1169 print_compiler_thread(ct); 1170 } 1171 if (_c2_count > 0) { // C2 is present 1172 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d SC CompilerThread", 2); 1173 Handle thread_oop = create_thread_oop(name_buffer, CHECK); 1174 jobject thread_handle = JNIHandles::make_global(thread_oop); 1175 _sc_objects[i] = thread_handle; 1176 _sc_logs[i] = nullptr; 1177 1178 JavaThread *ct = make_thread(compiler_t, thread_handle, _sc2_compile_queue, _compilers[1], THREAD); 1179 assert(ct != nullptr, "should have been handled for initial thread"); 1180 print_compiler_thread(ct); 1181 } 1182 } 1183 1184 if (UsePerfData) { 1185 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count + _c3_count, CHECK); 1186 } 1187 1188 #if defined(ASSERT) && COMPILER2_OR_JVMCI 1189 if (DeoptimizeObjectsALot) { 1190 // Initialize and start the object deoptimizer threads 1191 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll; 1192 for (int count = 0; count < total_count; count++) { 1193 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK); 1194 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1195 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD); 1196 } 1197 } 1198 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI 1199 } 1200 1201 void CompileBroker::init_training_replay() { 1202 // Ensure any exceptions lead to vm_exit_during_initialization. 1203 EXCEPTION_MARK; 1204 if (TrainingData::have_data()) { 1205 if (UseConcurrentTrainingReplay) { 1206 Handle thread_oop = create_thread_oop("Training replay thread", CHECK); 1207 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop()); 1208 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD); 1209 } 1210 _replay_initialized = true; 1211 } 1212 } 1213 1214 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) { 1215 1216 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0; 1217 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4; 1218 1219 // Quick check if we already have enough compiler threads without taking the lock. 1220 // Numbers may change concurrently, so we read them again after we have the lock. 1221 if (_c2_compile_queue != nullptr) { 1222 old_c2_count = get_c2_thread_count(); 1223 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread); 1224 } 1225 if (_c1_compile_queue != nullptr) { 1226 old_c1_count = get_c1_thread_count(); 1227 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread); 1228 } 1229 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return; 1230 1231 // Now, we do the more expensive operations. 1232 julong free_memory = os::free_memory(); 1233 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All). 1234 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled), 1235 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled); 1236 1237 // Only attempt to start additional threads if the lock is free. 1238 if (!CompileThread_lock->try_lock()) return; 1239 1240 if (_c2_compile_queue != nullptr) { 1241 old_c2_count = get_c2_thread_count(); 1242 new_c2_count = MIN4(_c2_count, 1243 _c2_compile_queue->size() / c2_tasks_per_thread, 1244 (int)(free_memory / (200*M)), 1245 (int)(available_cc_np / (128*K))); 1246 1247 for (int i = old_c2_count; i < new_c2_count; i++) { 1248 #if INCLUDE_JVMCI 1249 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) { 1250 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their 1251 // existence is completely hidden from the rest of the VM (and those compiler threads can't 1252 // call Java code to do the creation anyway). 1253 // 1254 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we 1255 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For 1256 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary 1257 // coupling with Java. 1258 if (!THREAD->can_call_java()) break; 1259 char name_buffer[256]; 1260 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i); 1261 Handle thread_oop; 1262 { 1263 // We have to give up the lock temporarily for the Java calls. 1264 MutexUnlocker mu(CompileThread_lock); 1265 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD); 1266 } 1267 if (HAS_PENDING_EXCEPTION) { 1268 if (trace_compiler_threads()) { 1269 ResourceMark rm; 1270 stringStream msg; 1271 msg.print_cr("JVMCI compiler thread creation failed:"); 1272 PENDING_EXCEPTION->print_on(&msg); 1273 print_compiler_threads(msg); 1274 } 1275 CLEAR_PENDING_EXCEPTION; 1276 break; 1277 } 1278 // Check if another thread has beaten us during the Java calls. 1279 if (get_c2_thread_count() != i) break; 1280 jobject thread_handle = JNIHandles::make_global(thread_oop); 1281 assert(compiler2_object(i) == nullptr, "Old one must be released!"); 1282 _compiler2_objects[i] = thread_handle; 1283 } 1284 #endif 1285 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist"); 1286 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD); 1287 if (ct == nullptr) break; 1288 _compilers[1]->set_num_compiler_threads(i + 1); 1289 if (trace_compiler_threads()) { 1290 ResourceMark rm; 1291 ThreadsListHandle tlh; // name() depends on the TLH. 1292 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1293 stringStream msg; 1294 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)", 1295 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M)); 1296 print_compiler_threads(msg); 1297 } 1298 } 1299 } 1300 1301 if (_c1_compile_queue != nullptr) { 1302 old_c1_count = get_c1_thread_count(); 1303 new_c1_count = MIN4(_c1_count, 1304 _c1_compile_queue->size() / c1_tasks_per_thread, 1305 (int)(free_memory / (100*M)), 1306 (int)(available_cc_p / (128*K))); 1307 1308 for (int i = old_c1_count; i < new_c1_count; i++) { 1309 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD); 1310 if (ct == nullptr) break; 1311 _compilers[0]->set_num_compiler_threads(i + 1); 1312 if (trace_compiler_threads()) { 1313 ResourceMark rm; 1314 ThreadsListHandle tlh; // name() depends on the TLH. 1315 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct)); 1316 stringStream msg; 1317 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)", 1318 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M)); 1319 print_compiler_threads(msg); 1320 } 1321 } 1322 } 1323 1324 CompileThread_lock->unlock(); 1325 } 1326 1327 1328 /** 1329 * Set the methods on the stack as on_stack so that redefine classes doesn't 1330 * reclaim them. This method is executed at a safepoint. 1331 */ 1332 void CompileBroker::mark_on_stack() { 1333 assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); 1334 // Since we are at a safepoint, we do not need a lock to access 1335 // the compile queues. 1336 if (_c3_compile_queue != nullptr) { 1337 _c3_compile_queue->mark_on_stack(); 1338 } 1339 if (_c2_compile_queue != nullptr) { 1340 _c2_compile_queue->mark_on_stack(); 1341 } 1342 if (_c1_compile_queue != nullptr) { 1343 _c1_compile_queue->mark_on_stack(); 1344 } 1345 if (_sc1_compile_queue != nullptr) { 1346 _sc1_compile_queue->mark_on_stack(); 1347 } 1348 if (_sc2_compile_queue != nullptr) { 1349 _sc2_compile_queue->mark_on_stack(); 1350 } 1351 } 1352 1353 // ------------------------------------------------------------------ 1354 // CompileBroker::compile_method 1355 // 1356 // Request compilation of a method. 1357 void CompileBroker::compile_method_base(const methodHandle& method, 1358 int osr_bci, 1359 int comp_level, 1360 const methodHandle& hot_method, 1361 int hot_count, 1362 CompileTask::CompileReason compile_reason, 1363 bool requires_online_compilation, 1364 bool blocking, 1365 Thread* thread) { 1366 guarantee(!method->is_abstract(), "cannot compile abstract methods"); 1367 assert(method->method_holder()->is_instance_klass(), 1368 "sanity check"); 1369 assert(!method->method_holder()->is_not_initialized() || 1370 compile_reason == CompileTask::Reason_Preload || 1371 compile_reason == CompileTask::Reason_Precompile || 1372 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1373 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys"); 1374 1375 if (CIPrintRequests) { 1376 tty->print("request: "); 1377 method->print_short_name(tty); 1378 if (osr_bci != InvocationEntryBci) { 1379 tty->print(" osr_bci: %d", osr_bci); 1380 } 1381 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count); 1382 if (!hot_method.is_null()) { 1383 tty->print(" hot: "); 1384 if (hot_method() != method()) { 1385 hot_method->print_short_name(tty); 1386 } else { 1387 tty->print("yes"); 1388 } 1389 } 1390 tty->cr(); 1391 } 1392 1393 // A request has been made for compilation. Before we do any 1394 // real work, check to see if the method has been compiled 1395 // in the meantime with a definitive result. 1396 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1397 return; 1398 } 1399 1400 #ifndef PRODUCT 1401 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) { 1402 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) { 1403 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI. 1404 return; 1405 } 1406 } 1407 #endif 1408 1409 // If this method is already in the compile queue, then 1410 // we do not block the current thread. 1411 if (compilation_is_in_queue(method)) { 1412 // We may want to decay our counter a bit here to prevent 1413 // multiple denied requests for compilation. This is an 1414 // open compilation policy issue. Note: The other possibility, 1415 // in the case that this is a blocking compile request, is to have 1416 // all subsequent blocking requesters wait for completion of 1417 // ongoing compiles. Note that in this case we'll need a protocol 1418 // for freeing the associated compile tasks. [Or we could have 1419 // a single static monitor on which all these waiters sleep.] 1420 return; 1421 } 1422 1423 // Tiered policy requires MethodCounters to exist before adding a method to 1424 // the queue. Create if we don't have them yet. 1425 if (compile_reason != CompileTask::Reason_Preload) { 1426 method->get_method_counters(thread); 1427 } 1428 1429 SCCEntry* scc_entry = find_scc_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation); 1430 bool is_scc = (scc_entry != nullptr); 1431 1432 // Outputs from the following MutexLocker block: 1433 CompileTask* task = nullptr; 1434 CompileQueue* queue; 1435 #if INCLUDE_JVMCI 1436 if (is_c2_compile(comp_level) && compiler2()->is_jvmci() && compiler3() != nullptr && 1437 ((JVMCICompiler*)compiler2())->force_comp_at_level_simple(method)) { 1438 assert(_c3_compile_queue != nullptr, "sanity"); 1439 queue = _c3_compile_queue; // JVMCI compiler's methods compilation 1440 } else 1441 #endif 1442 queue = compile_queue(comp_level, is_scc); 1443 1444 // Acquire our lock. 1445 { 1446 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues); 1447 1448 // Make sure the method has not slipped into the queues since 1449 // last we checked; note that those checks were "fast bail-outs". 1450 // Here we need to be more careful, see 14012000 below. 1451 if (compilation_is_in_queue(method)) { 1452 return; 1453 } 1454 1455 // We need to check again to see if the compilation has 1456 // completed. A previous compilation may have registered 1457 // some result. 1458 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1459 return; 1460 } 1461 1462 // We now know that this compilation is not pending, complete, 1463 // or prohibited. Assign a compile_id to this compilation 1464 // and check to see if it is in our [Start..Stop) range. 1465 int compile_id = assign_compile_id(method, osr_bci); 1466 if (compile_id == 0) { 1467 // The compilation falls outside the allowed range. 1468 return; 1469 } 1470 1471 #if INCLUDE_JVMCI 1472 if (UseJVMCICompiler && blocking) { 1473 // Don't allow blocking compiles for requests triggered by JVMCI. 1474 if (thread->is_Compiler_thread()) { 1475 blocking = false; 1476 } 1477 1478 // In libjvmci, JVMCI initialization should not deadlock with other threads 1479 if (!UseJVMCINativeLibrary) { 1480 // Don't allow blocking compiles if inside a class initializer or while performing class loading 1481 vframeStream vfst(JavaThread::cast(thread)); 1482 for (; !vfst.at_end(); vfst.next()) { 1483 if (vfst.method()->is_static_initializer() || 1484 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) && 1485 vfst.method()->name() == vmSymbols::loadClass_name())) { 1486 blocking = false; 1487 break; 1488 } 1489 } 1490 1491 // Don't allow blocking compilation requests to JVMCI 1492 // if JVMCI itself is not yet initialized 1493 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) { 1494 blocking = false; 1495 } 1496 } 1497 1498 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown 1499 // to avoid deadlock between compiler thread(s) and threads run at shutdown 1500 // such as the DestroyJavaVM thread. 1501 if (JVMCI::in_shutdown()) { 1502 blocking = false; 1503 } 1504 } 1505 #endif // INCLUDE_JVMCI 1506 1507 // We will enter the compilation in the queue. 1508 // 14012000: Note that this sets the queued_for_compile bits in 1509 // the target method. We can now reason that a method cannot be 1510 // queued for compilation more than once, as follows: 1511 // Before a thread queues a task for compilation, it first acquires 1512 // the compile queue lock, then checks if the method's queued bits 1513 // are set or it has already been compiled. Thus there can not be two 1514 // instances of a compilation task for the same method on the 1515 // compilation queue. Consider now the case where the compilation 1516 // thread has already removed a task for that method from the queue 1517 // and is in the midst of compiling it. In this case, the 1518 // queued_for_compile bits must be set in the method (and these 1519 // will be visible to the current thread, since the bits were set 1520 // under protection of the compile queue lock, which we hold now. 1521 // When the compilation completes, the compiler thread first sets 1522 // the compilation result and then clears the queued_for_compile 1523 // bits. Neither of these actions are protected by a barrier (or done 1524 // under the protection of a lock), so the only guarantee we have 1525 // (on machines with TSO (Total Store Order)) is that these values 1526 // will update in that order. As a result, the only combinations of 1527 // these bits that the current thread will see are, in temporal order: 1528 // <RESULT, QUEUE> : 1529 // <0, 1> : in compile queue, but not yet compiled 1530 // <1, 1> : compiled but queue bit not cleared 1531 // <1, 0> : compiled and queue bit cleared 1532 // Because we first check the queue bits then check the result bits, 1533 // we are assured that we cannot introduce a duplicate task. 1534 // Note that if we did the tests in the reverse order (i.e. check 1535 // result then check queued bit), we could get the result bit before 1536 // the compilation completed, and the queue bit after the compilation 1537 // completed, and end up introducing a "duplicate" (redundant) task. 1538 // In that case, the compiler thread should first check if a method 1539 // has already been compiled before trying to compile it. 1540 // NOTE: in the event that there are multiple compiler threads and 1541 // there is de-optimization/recompilation, things will get hairy, 1542 // and in that case it's best to protect both the testing (here) of 1543 // these bits, and their updating (here and elsewhere) under a 1544 // common lock. 1545 task = create_compile_task(queue, 1546 compile_id, method, 1547 osr_bci, comp_level, 1548 hot_method, hot_count, scc_entry, compile_reason, 1549 requires_online_compilation, blocking); 1550 1551 if (task->is_scc() && (_sc_count > 0)) { 1552 // Put it on SC queue 1553 queue = is_c1_compile(comp_level) ? _sc1_compile_queue : _sc2_compile_queue; 1554 } 1555 1556 if (UseLockFreeCompileQueues) { 1557 assert(queue->lock()->owned_by_self() == false, ""); 1558 queue->add_pending(task); 1559 } else { 1560 queue->add(task); 1561 } 1562 } 1563 1564 if (blocking) { 1565 wait_for_completion(task); 1566 } 1567 } 1568 1569 SCCEntry* CompileBroker::find_scc_entry(const methodHandle& method, int osr_bci, int comp_level, 1570 CompileTask::CompileReason compile_reason, 1571 bool requires_online_compilation) { 1572 SCCEntry* scc_entry = nullptr; 1573 if (osr_bci == InvocationEntryBci && !requires_online_compilation && SCCache::is_on_for_read()) { 1574 // Check for cached code. 1575 if (compile_reason == CompileTask::Reason_Preload) { 1576 scc_entry = method->scc_entry(); 1577 assert(scc_entry != nullptr && scc_entry->for_preload(), "sanity"); 1578 } else { 1579 scc_entry = SCCache::find_code_entry(method, comp_level); 1580 } 1581 } 1582 return scc_entry; 1583 } 1584 1585 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1586 int comp_level, 1587 const methodHandle& hot_method, int hot_count, 1588 bool requires_online_compilation, 1589 CompileTask::CompileReason compile_reason, 1590 TRAPS) { 1591 // Do nothing if compilebroker is not initialized or compiles are submitted on level none 1592 if (!_initialized || comp_level == CompLevel_none) { 1593 return nullptr; 1594 } 1595 1596 #if INCLUDE_JVMCI 1597 if (EnableJVMCI && UseJVMCICompiler && 1598 comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) { 1599 return nullptr; 1600 } 1601 #endif 1602 1603 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 1604 assert(comp != nullptr, "Ensure we have a compiler"); 1605 1606 #if INCLUDE_JVMCI 1607 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) { 1608 // JVMCI compilation is not yet initializable. 1609 return nullptr; 1610 } 1611 #endif 1612 1613 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp); 1614 // CompileBroker::compile_method can trap and can have pending async exception. 1615 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_method, hot_count, requires_online_compilation, compile_reason, directive, THREAD); 1616 DirectivesStack::release(directive); 1617 return nm; 1618 } 1619 1620 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci, 1621 int comp_level, 1622 const methodHandle& hot_method, int hot_count, 1623 bool requires_online_compilation, 1624 CompileTask::CompileReason compile_reason, 1625 DirectiveSet* directive, 1626 TRAPS) { 1627 1628 // make sure arguments make sense 1629 assert(method->method_holder()->is_instance_klass(), "not an instance method"); 1630 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range"); 1631 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods"); 1632 assert(!method->method_holder()->is_not_initialized() || 1633 compile_reason == CompileTask::Reason_Preload || 1634 compile_reason == CompileTask::Reason_Precompile || 1635 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized"); 1636 // return quickly if possible 1637 1638 if (PrecompileOnlyAndExit && !CompileTask::reason_is_precompiled(compile_reason)) { 1639 return nullptr; 1640 } 1641 1642 // lock, make sure that the compilation 1643 // isn't prohibited in a straightforward way. 1644 AbstractCompiler* comp = CompileBroker::compiler(comp_level); 1645 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) { 1646 return nullptr; 1647 } 1648 1649 if (osr_bci == InvocationEntryBci) { 1650 // standard compilation 1651 nmethod* method_code = method->code(); 1652 if (method_code != nullptr) { 1653 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) { 1654 return method_code; 1655 } 1656 } 1657 if (method->is_not_compilable(comp_level)) { 1658 return nullptr; 1659 } 1660 } else { 1661 // osr compilation 1662 // We accept a higher level osr method 1663 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1664 if (nm != nullptr) return nm; 1665 if (method->is_not_osr_compilable(comp_level)) return nullptr; 1666 } 1667 1668 assert(!HAS_PENDING_EXCEPTION, "No exception should be present"); 1669 // some prerequisites that are compiler specific 1670 if (compile_reason != CompileTask::Reason_Preload && (comp->is_c2() || comp->is_jvmci())) { 1671 InternalOOMEMark iom(THREAD); 1672 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL); 1673 // Resolve all classes seen in the signature of the method 1674 // we are compiling. 1675 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL); 1676 } 1677 1678 // If the method is native, do the lookup in the thread requesting 1679 // the compilation. Native lookups can load code, which is not 1680 // permitted during compilation. 1681 // 1682 // Note: A native method implies non-osr compilation which is 1683 // checked with an assertion at the entry of this method. 1684 if (method->is_native() && !method->is_method_handle_intrinsic()) { 1685 address adr = NativeLookup::lookup(method, THREAD); 1686 if (HAS_PENDING_EXCEPTION) { 1687 // In case of an exception looking up the method, we just forget 1688 // about it. The interpreter will kick-in and throw the exception. 1689 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable() 1690 CLEAR_PENDING_EXCEPTION; 1691 return nullptr; 1692 } 1693 assert(method->has_native_function(), "must have native code by now"); 1694 } 1695 1696 // RedefineClasses() has replaced this method; just return 1697 if (method->is_old()) { 1698 return nullptr; 1699 } 1700 1701 // JVMTI -- post_compile_event requires jmethod_id() that may require 1702 // a lock the compiling thread can not acquire. Prefetch it here. 1703 if (JvmtiExport::should_post_compiled_method_load()) { 1704 method->jmethod_id(); 1705 } 1706 1707 // do the compilation 1708 if (method->is_native()) { 1709 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) { 1710 #if defined(X86) && !defined(ZERO) 1711 // The following native methods: 1712 // 1713 // java.lang.Float.intBitsToFloat 1714 // java.lang.Float.floatToRawIntBits 1715 // java.lang.Double.longBitsToDouble 1716 // java.lang.Double.doubleToRawLongBits 1717 // 1718 // are called through the interpreter even if interpreter native stubs 1719 // are not preferred (i.e., calling through adapter handlers is preferred). 1720 // The reason is that on x86_32 signaling NaNs (sNaNs) are not preserved 1721 // if the version of the methods from the native libraries is called. 1722 // As the interpreter and the C2-intrinsified version of the methods preserves 1723 // sNaNs, that would result in an inconsistent way of handling of sNaNs. 1724 if ((UseSSE >= 1 && 1725 (method->intrinsic_id() == vmIntrinsics::_intBitsToFloat || 1726 method->intrinsic_id() == vmIntrinsics::_floatToRawIntBits)) || 1727 (UseSSE >= 2 && 1728 (method->intrinsic_id() == vmIntrinsics::_longBitsToDouble || 1729 method->intrinsic_id() == vmIntrinsics::_doubleToRawLongBits))) { 1730 return nullptr; 1731 } 1732 #endif // X86 && !ZERO 1733 1734 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that 1735 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime). 1736 // 1737 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter 1738 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls. 1739 AdapterHandlerLibrary::create_native_wrapper(method); 1740 } else { 1741 return nullptr; 1742 } 1743 } else { 1744 // If the compiler is shut off due to code cache getting full 1745 // fail out now so blocking compiles dont hang the java thread 1746 if (!should_compile_new_jobs()) { 1747 return nullptr; 1748 } 1749 bool is_blocking = ReplayCompiles || 1750 !directive->BackgroundCompilationOption || 1751 (compile_reason == CompileTask::Reason_Precompile) || 1752 (compile_reason == CompileTask::Reason_PrecompileForPreload); 1753 compile_method_base(method, osr_bci, comp_level, hot_method, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD); 1754 } 1755 1756 // return requested nmethod 1757 // We accept a higher level osr method 1758 if (osr_bci == InvocationEntryBci) { 1759 return method->code(); 1760 } 1761 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false); 1762 } 1763 1764 1765 // ------------------------------------------------------------------ 1766 // CompileBroker::compilation_is_complete 1767 // 1768 // See if compilation of this method is already complete. 1769 bool CompileBroker::compilation_is_complete(Method* method, 1770 int osr_bci, 1771 int comp_level, 1772 bool online_only, 1773 CompileTask::CompileReason compile_reason) { 1774 if (compile_reason == CompileTask::Reason_Precompile || 1775 compile_reason == CompileTask::Reason_PrecompileForPreload) { 1776 return false; // FIXME: any restrictions? 1777 } 1778 bool is_osr = (osr_bci != standard_entry_bci); 1779 if (is_osr) { 1780 if (method->is_not_osr_compilable(comp_level)) { 1781 return true; 1782 } else { 1783 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true); 1784 return (result != nullptr); 1785 } 1786 } else { 1787 if (method->is_not_compilable(comp_level)) { 1788 return true; 1789 } else { 1790 nmethod* result = method->code(); 1791 if (result == nullptr) { 1792 return false; 1793 } 1794 if (online_only && result->is_scc()) { 1795 return false; 1796 } 1797 bool same_level = (comp_level == result->comp_level()); 1798 if (result->has_clinit_barriers()) { 1799 return !same_level; // Allow replace preloaded code with new code of the same level 1800 } 1801 return same_level; 1802 } 1803 } 1804 } 1805 1806 1807 /** 1808 * See if this compilation is already requested. 1809 * 1810 * Implementation note: there is only a single "is in queue" bit 1811 * for each method. This means that the check below is overly 1812 * conservative in the sense that an osr compilation in the queue 1813 * will block a normal compilation from entering the queue (and vice 1814 * versa). This can be remedied by a full queue search to disambiguate 1815 * cases. If it is deemed profitable, this may be done. 1816 */ 1817 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) { 1818 return method->queued_for_compilation(); 1819 } 1820 1821 // ------------------------------------------------------------------ 1822 // CompileBroker::compilation_is_prohibited 1823 // 1824 // See if this compilation is not allowed. 1825 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) { 1826 bool is_native = method->is_native(); 1827 // Some compilers may not support the compilation of natives. 1828 AbstractCompiler *comp = compiler(comp_level); 1829 if (is_native && (!CICompileNatives || comp == nullptr)) { 1830 method->set_not_compilable_quietly("native methods not supported", comp_level); 1831 return true; 1832 } 1833 1834 bool is_osr = (osr_bci != standard_entry_bci); 1835 // Some compilers may not support on stack replacement. 1836 if (is_osr && (!CICompileOSR || comp == nullptr)) { 1837 method->set_not_osr_compilable("OSR not supported", comp_level); 1838 return true; 1839 } 1840 1841 // The method may be explicitly excluded by the user. 1842 double scale; 1843 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) { 1844 bool quietly = CompilerOracle::be_quiet(); 1845 if (PrintCompilation && !quietly) { 1846 // This does not happen quietly... 1847 ResourceMark rm; 1848 tty->print("### Excluding %s:%s", 1849 method->is_native() ? "generation of native wrapper" : "compile", 1850 (method->is_static() ? " static" : "")); 1851 method->print_short_name(tty); 1852 tty->cr(); 1853 } 1854 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly); 1855 } 1856 1857 return false; 1858 } 1859 1860 /** 1861 * Generate serialized IDs for compilation requests. If certain debugging flags are used 1862 * and the ID is not within the specified range, the method is not compiled and 0 is returned. 1863 * The function also allows to generate separate compilation IDs for OSR compilations. 1864 */ 1865 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) { 1866 #ifdef ASSERT 1867 bool is_osr = (osr_bci != standard_entry_bci); 1868 int id; 1869 if (method->is_native()) { 1870 assert(!is_osr, "can't be osr"); 1871 // Adapters, native wrappers and method handle intrinsics 1872 // should be generated always. 1873 return Atomic::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1); 1874 } else if (CICountOSR && is_osr) { 1875 id = Atomic::add(&_osr_compilation_id, 1); 1876 if (CIStartOSR <= id && id < CIStopOSR) { 1877 return id; 1878 } 1879 } else { 1880 id = Atomic::add(&_compilation_id, 1); 1881 if (CIStart <= id && id < CIStop) { 1882 return id; 1883 } 1884 } 1885 1886 // Method was not in the appropriate compilation range. 1887 method->set_not_compilable_quietly("Not in requested compile id range"); 1888 return 0; 1889 #else 1890 // CICountOSR is a develop flag and set to 'false' by default. In a product built, 1891 // only _compilation_id is incremented. 1892 return Atomic::add(&_compilation_id, 1); 1893 #endif 1894 } 1895 1896 // ------------------------------------------------------------------ 1897 // CompileBroker::assign_compile_id_unlocked 1898 // 1899 // Public wrapper for assign_compile_id that acquires the needed locks 1900 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) { 1901 return assign_compile_id(method, osr_bci); 1902 } 1903 1904 // ------------------------------------------------------------------ 1905 // CompileBroker::create_compile_task 1906 // 1907 // Create a CompileTask object representing the current request for 1908 // compilation. Add this task to the queue. 1909 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue, 1910 int compile_id, 1911 const methodHandle& method, 1912 int osr_bci, 1913 int comp_level, 1914 const methodHandle& hot_method, 1915 int hot_count, 1916 SCCEntry* scc_entry, 1917 CompileTask::CompileReason compile_reason, 1918 bool requires_online_compilation, 1919 bool blocking) { 1920 CompileTask* new_task = CompileTask::allocate(); 1921 new_task->initialize(compile_id, method, osr_bci, comp_level, 1922 hot_method, hot_count, scc_entry, compile_reason, queue, 1923 requires_online_compilation, blocking); 1924 return new_task; 1925 } 1926 1927 #if INCLUDE_JVMCI 1928 // The number of milliseconds to wait before checking if 1929 // JVMCI compilation has made progress. 1930 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000; 1931 1932 // The number of JVMCI compilation progress checks that must fail 1933 // before unblocking a thread waiting for a blocking compilation. 1934 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10; 1935 1936 /** 1937 * Waits for a JVMCI compiler to complete a given task. This thread 1938 * waits until either the task completes or it sees no JVMCI compilation 1939 * progress for N consecutive milliseconds where N is 1940 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE * 1941 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS. 1942 * 1943 * @return true if this thread needs to free/recycle the task 1944 */ 1945 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) { 1946 assert(UseJVMCICompiler, "sanity"); 1947 MonitorLocker ml(thread, task->lock()); 1948 int progress_wait_attempts = 0; 1949 jint thread_jvmci_compilation_ticks = 0; 1950 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks(); 1951 while (!task->is_complete() && !is_compilation_disabled_forever() && 1952 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) { 1953 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state(); 1954 1955 bool progress; 1956 if (jvmci_compile_state != nullptr) { 1957 jint ticks = jvmci_compile_state->compilation_ticks(); 1958 progress = (ticks - thread_jvmci_compilation_ticks) != 0; 1959 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks); 1960 thread_jvmci_compilation_ticks = ticks; 1961 } else { 1962 // Still waiting on JVMCI compiler queue. This thread may be holding a lock 1963 // that all JVMCI compiler threads are blocked on. We use the global JVMCI 1964 // compilation ticks to determine whether JVMCI compilation 1965 // is still making progress through the JVMCI compiler queue. 1966 jint ticks = jvmci->global_compilation_ticks(); 1967 progress = (ticks - global_jvmci_compilation_ticks) != 0; 1968 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks); 1969 global_jvmci_compilation_ticks = ticks; 1970 } 1971 1972 if (!progress) { 1973 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) { 1974 if (PrintCompilation) { 1975 task->print(tty, "wait for blocking compilation timed out"); 1976 } 1977 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id()); 1978 break; 1979 } 1980 } else { 1981 progress_wait_attempts = 0; 1982 } 1983 } 1984 task->clear_waiter(); 1985 return task->is_complete(); 1986 } 1987 #endif 1988 1989 /** 1990 * Wait for the compilation task to complete. 1991 */ 1992 void CompileBroker::wait_for_completion(CompileTask* task) { 1993 if (CIPrintCompileQueue) { 1994 ttyLocker ttyl; 1995 tty->print_cr("BLOCKING FOR COMPILE"); 1996 } 1997 1998 assert(task->is_blocking(), "can only wait on blocking task"); 1999 2000 JavaThread* thread = JavaThread::current(); 2001 2002 methodHandle method(thread, task->method()); 2003 bool free_task; 2004 #if INCLUDE_JVMCI 2005 AbstractCompiler* comp = compiler(task->comp_level()); 2006 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) { 2007 // It may return before compilation is completed. 2008 // Note that libjvmci should not pre-emptively unblock 2009 // a thread waiting for a compilation as it does not call 2010 // Java code and so is not deadlock prone like jarjvmci. 2011 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread); 2012 } else 2013 #endif 2014 { 2015 MonitorLocker ml(thread, task->lock()); 2016 free_task = true; 2017 while (!task->is_complete() && !is_compilation_disabled_forever()) { 2018 ml.wait(); 2019 } 2020 } 2021 2022 if (free_task) { 2023 if (is_compilation_disabled_forever()) { 2024 CompileTask::free(task); 2025 return; 2026 } 2027 2028 // It is harmless to check this status without the lock, because 2029 // completion is a stable property (until the task object is recycled). 2030 assert(task->is_complete(), "Compilation should have completed"); 2031 2032 // By convention, the waiter is responsible for recycling a 2033 // blocking CompileTask. Since there is only one waiter ever 2034 // waiting on a CompileTask, we know that no one else will 2035 // be using this CompileTask; we can free it. 2036 CompileTask::free(task); 2037 } 2038 } 2039 2040 /** 2041 * Initialize compiler thread(s) + compiler object(s). The postcondition 2042 * of this function is that the compiler runtimes are initialized and that 2043 * compiler threads can start compiling. 2044 */ 2045 bool CompileBroker::init_compiler_runtime() { 2046 CompilerThread* thread = CompilerThread::current(); 2047 AbstractCompiler* comp = thread->compiler(); 2048 // Final sanity check - the compiler object must exist 2049 guarantee(comp != nullptr, "Compiler object must exist"); 2050 2051 { 2052 // Must switch to native to allocate ci_env 2053 ThreadToNativeFromVM ttn(thread); 2054 ciEnv ci_env((CompileTask*)nullptr); 2055 // Cache Jvmti state 2056 ci_env.cache_jvmti_state(); 2057 // Cache DTrace flags 2058 ci_env.cache_dtrace_flags(); 2059 2060 // Switch back to VM state to do compiler initialization 2061 ThreadInVMfromNative tv(thread); 2062 2063 // Perform per-thread and global initializations 2064 { 2065 MutexLocker only_one (thread, CompileThread_lock); 2066 SCCache::init_table(); 2067 } 2068 comp->initialize(); 2069 } 2070 2071 if (comp->is_failed()) { 2072 disable_compilation_forever(); 2073 // If compiler initialization failed, no compiler thread that is specific to a 2074 // particular compiler runtime will ever start to compile methods. 2075 shutdown_compiler_runtime(comp, thread); 2076 return false; 2077 } 2078 2079 // C1 specific check 2080 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) { 2081 warning("Initialization of %s thread failed (no space to run compilers)", thread->name()); 2082 return false; 2083 } 2084 2085 return true; 2086 } 2087 2088 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) { 2089 BufferBlob* blob = thread->get_buffer_blob(); 2090 if (blob != nullptr) { 2091 blob->purge(); 2092 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2093 CodeCache::free(blob); 2094 } 2095 } 2096 2097 /** 2098 * If C1 and/or C2 initialization failed, we shut down all compilation. 2099 * We do this to keep things simple. This can be changed if it ever turns 2100 * out to be a problem. 2101 */ 2102 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) { 2103 free_buffer_blob_if_allocated(thread); 2104 2105 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread)); 2106 2107 if (comp->should_perform_shutdown()) { 2108 // There are two reasons for shutting down the compiler 2109 // 1) compiler runtime initialization failed 2110 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing 2111 warning("%s initialization failed. Shutting down all compilers", comp->name()); 2112 2113 // Only one thread per compiler runtime object enters here 2114 // Set state to shut down 2115 comp->set_shut_down(); 2116 2117 // Delete all queued compilation tasks to make compiler threads exit faster. 2118 if (_c1_compile_queue != nullptr) { 2119 _c1_compile_queue->free_all(); 2120 } 2121 2122 if (_c2_compile_queue != nullptr) { 2123 _c2_compile_queue->free_all(); 2124 } 2125 2126 if (_c3_compile_queue != nullptr) { 2127 _c3_compile_queue->free_all(); 2128 } 2129 2130 // Set flags so that we continue execution with using interpreter only. 2131 UseCompiler = false; 2132 UseInterpreter = true; 2133 2134 // We could delete compiler runtimes also. However, there are references to 2135 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then 2136 // fail. This can be done later if necessary. 2137 } 2138 } 2139 2140 /** 2141 * Helper function to create new or reuse old CompileLog. 2142 */ 2143 CompileLog* CompileBroker::get_log(CompilerThread* ct) { 2144 if (!LogCompilation) return nullptr; 2145 2146 AbstractCompiler *compiler = ct->compiler(); 2147 bool jvmci = JVMCI_ONLY( compiler->is_jvmci() ||) false; 2148 bool c1 = compiler->is_c1(); 2149 jobject* compiler_objects = c1 ? _compiler1_objects : (_c3_count == 0 ? _compiler2_objects : (jvmci ? _compiler2_objects : _compiler3_objects)); 2150 assert(compiler_objects != nullptr, "must be initialized at this point"); 2151 CompileLog** logs = c1 ? _compiler1_logs : (_c3_count == 0 ? _compiler2_logs : (jvmci ? _compiler2_logs : _compiler3_logs)); 2152 assert(logs != nullptr, "must be initialized at this point"); 2153 int count = c1 ? _c1_count : (_c3_count == 0 ? _c2_count : (jvmci ? _c2_count : _c3_count)); 2154 2155 if (ct->queue() == _sc1_compile_queue || ct->queue() == _sc2_compile_queue) { 2156 compiler_objects = _sc_objects; 2157 logs = _sc_logs; 2158 count = _sc_count; 2159 } 2160 // Find Compiler number by its threadObj. 2161 oop compiler_obj = ct->threadObj(); 2162 int compiler_number = 0; 2163 bool found = false; 2164 for (; compiler_number < count; compiler_number++) { 2165 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) { 2166 found = true; 2167 break; 2168 } 2169 } 2170 assert(found, "Compiler must exist at this point"); 2171 2172 // Determine pointer for this thread's log. 2173 CompileLog** log_ptr = &logs[compiler_number]; 2174 2175 // Return old one if it exists. 2176 CompileLog* log = *log_ptr; 2177 if (log != nullptr) { 2178 ct->init_log(log); 2179 return log; 2180 } 2181 2182 // Create a new one and remember it. 2183 init_compiler_thread_log(); 2184 log = ct->log(); 2185 *log_ptr = log; 2186 return log; 2187 } 2188 2189 // ------------------------------------------------------------------ 2190 // CompileBroker::compiler_thread_loop 2191 // 2192 // The main loop run by a CompilerThread. 2193 void CompileBroker::compiler_thread_loop() { 2194 CompilerThread* thread = CompilerThread::current(); 2195 CompileQueue* queue = thread->queue(); 2196 // For the thread that initializes the ciObjectFactory 2197 // this resource mark holds all the shared objects 2198 ResourceMark rm; 2199 2200 // First thread to get here will initialize the compiler interface 2201 2202 { 2203 ASSERT_IN_VM; 2204 MutexLocker only_one (thread, CompileThread_lock); 2205 if (!ciObjectFactory::is_initialized()) { 2206 ciObjectFactory::initialize(); 2207 } 2208 } 2209 2210 // Open a log. 2211 CompileLog* log = get_log(thread); 2212 if (log != nullptr) { 2213 log->begin_elem("start_compile_thread name='%s' thread='" UINTX_FORMAT "' process='%d'", 2214 thread->name(), 2215 os::current_thread_id(), 2216 os::current_process_id()); 2217 log->stamp(); 2218 log->end_elem(); 2219 } 2220 2221 // If compiler thread/runtime initialization fails, exit the compiler thread 2222 if (!init_compiler_runtime()) { 2223 return; 2224 } 2225 2226 thread->start_idle_timer(); 2227 2228 // Poll for new compilation tasks as long as the JVM runs. Compilation 2229 // should only be disabled if something went wrong while initializing the 2230 // compiler runtimes. This, in turn, should not happen. The only known case 2231 // when compiler runtime initialization fails is if there is not enough free 2232 // space in the code cache to generate the necessary stubs, etc. 2233 while (!is_compilation_disabled_forever()) { 2234 // We need this HandleMark to avoid leaking VM handles. 2235 HandleMark hm(thread); 2236 2237 RecompilationPolicy::recompilation_step(RecompilationWorkUnitSize, thread); 2238 2239 CompileTask* task = queue->get(thread); 2240 2241 if (task == nullptr) { 2242 if (UseDynamicNumberOfCompilerThreads) { 2243 // Access compiler_count under lock to enforce consistency. 2244 MutexLocker only_one(CompileThread_lock); 2245 if (can_remove(thread, true)) { 2246 if (trace_compiler_threads()) { 2247 ResourceMark rm; 2248 stringStream msg; 2249 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time", 2250 thread->name(), thread->idle_time_millis()); 2251 print_compiler_threads(msg); 2252 } 2253 2254 // Notify compiler that the compiler thread is about to stop 2255 thread->compiler()->stopping_compiler_thread(thread); 2256 2257 free_buffer_blob_if_allocated(thread); 2258 return; // Stop this thread. 2259 } 2260 } 2261 } else { 2262 // Assign the task to the current thread. Mark this compilation 2263 // thread as active for the profiler. 2264 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition 2265 // occurs after fetching the compile task off the queue. 2266 CompileTaskWrapper ctw(task); 2267 methodHandle method(thread, task->method()); 2268 2269 // Never compile a method if breakpoints are present in it 2270 if (method()->number_of_breakpoints() == 0) { 2271 // Compile the method. 2272 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) { 2273 invoke_compiler_on_method(task); 2274 thread->start_idle_timer(); 2275 } else { 2276 // After compilation is disabled, remove remaining methods from queue 2277 method->clear_queued_for_compilation(); 2278 method->set_pending_queue_processed(false); 2279 task->set_failure_reason("compilation is disabled"); 2280 } 2281 } else { 2282 task->set_failure_reason("breakpoints are present"); 2283 } 2284 2285 if (UseDynamicNumberOfCompilerThreads) { 2286 possibly_add_compiler_threads(thread); 2287 assert(!thread->has_pending_exception(), "should have been handled"); 2288 } 2289 } 2290 } 2291 2292 // Shut down compiler runtime 2293 shutdown_compiler_runtime(thread->compiler(), thread); 2294 } 2295 2296 // ------------------------------------------------------------------ 2297 // CompileBroker::init_compiler_thread_log 2298 // 2299 // Set up state required by +LogCompilation. 2300 void CompileBroker::init_compiler_thread_log() { 2301 CompilerThread* thread = CompilerThread::current(); 2302 char file_name[4*K]; 2303 FILE* fp = nullptr; 2304 intx thread_id = os::current_thread_id(); 2305 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 2306 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr); 2307 if (dir == nullptr) { 2308 jio_snprintf(file_name, sizeof(file_name), "hs_c" UINTX_FORMAT "_pid%u.log", 2309 thread_id, os::current_process_id()); 2310 } else { 2311 jio_snprintf(file_name, sizeof(file_name), 2312 "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir, 2313 os::file_separator(), thread_id, os::current_process_id()); 2314 } 2315 2316 fp = os::fopen(file_name, "wt"); 2317 if (fp != nullptr) { 2318 if (LogCompilation && Verbose) { 2319 tty->print_cr("Opening compilation log %s", file_name); 2320 } 2321 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id); 2322 if (log == nullptr) { 2323 fclose(fp); 2324 return; 2325 } 2326 thread->init_log(log); 2327 2328 if (xtty != nullptr) { 2329 ttyLocker ttyl; 2330 // Record any per thread log files 2331 xtty->elem("thread_logfile thread='" INTX_FORMAT "' filename='%s'", thread_id, file_name); 2332 } 2333 return; 2334 } 2335 } 2336 warning("Cannot open log file: %s", file_name); 2337 } 2338 2339 void CompileBroker::log_metaspace_failure() { 2340 const char* message = "some methods may not be compiled because metaspace " 2341 "is out of memory"; 2342 if (CompilationLog::log() != nullptr) { 2343 CompilationLog::log()->log_metaspace_failure(message); 2344 } 2345 if (PrintCompilation) { 2346 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message); 2347 } 2348 } 2349 2350 2351 // ------------------------------------------------------------------ 2352 // CompileBroker::set_should_block 2353 // 2354 // Set _should_block. 2355 // Call this from the VM, with Threads_lock held and a safepoint requested. 2356 void CompileBroker::set_should_block() { 2357 assert(Threads_lock->owner() == Thread::current(), "must have threads lock"); 2358 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already"); 2359 #ifndef PRODUCT 2360 if (PrintCompilation && (Verbose || WizardMode)) 2361 tty->print_cr("notifying compiler thread pool to block"); 2362 #endif 2363 _should_block = true; 2364 } 2365 2366 // ------------------------------------------------------------------ 2367 // CompileBroker::maybe_block 2368 // 2369 // Call this from the compiler at convenient points, to poll for _should_block. 2370 void CompileBroker::maybe_block() { 2371 if (_should_block) { 2372 #ifndef PRODUCT 2373 if (PrintCompilation && (Verbose || WizardMode)) 2374 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current())); 2375 #endif 2376 ThreadInVMfromNative tivfn(JavaThread::current()); 2377 } 2378 } 2379 2380 // wrapper for CodeCache::print_summary() 2381 static void codecache_print(bool detailed) 2382 { 2383 stringStream s; 2384 // Dump code cache into a buffer before locking the tty, 2385 { 2386 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2387 CodeCache::print_summary(&s, detailed); 2388 } 2389 ttyLocker ttyl; 2390 tty->print("%s", s.freeze()); 2391 } 2392 2393 // wrapper for CodeCache::print_summary() using outputStream 2394 static void codecache_print(outputStream* out, bool detailed) { 2395 stringStream s; 2396 2397 // Dump code cache into a buffer 2398 { 2399 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 2400 CodeCache::print_summary(&s, detailed); 2401 } 2402 2403 char* remaining_log = s.as_string(); 2404 while (*remaining_log != '\0') { 2405 char* eol = strchr(remaining_log, '\n'); 2406 if (eol == nullptr) { 2407 out->print_cr("%s", remaining_log); 2408 remaining_log = remaining_log + strlen(remaining_log); 2409 } else { 2410 *eol = '\0'; 2411 out->print_cr("%s", remaining_log); 2412 remaining_log = eol + 1; 2413 } 2414 } 2415 } 2416 2417 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env, 2418 int compilable, const char* failure_reason) { 2419 if (!AbortVMOnCompilationFailure) { 2420 return; 2421 } 2422 if (compilable == ciEnv::MethodCompilable_not_at_tier) { 2423 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason); 2424 } 2425 if (compilable == ciEnv::MethodCompilable_never) { 2426 fatal("Never compilable: %s", failure_reason); 2427 } 2428 } 2429 2430 static void post_compilation_event(EventCompilation& event, CompileTask* task) { 2431 assert(task != nullptr, "invariant"); 2432 CompilerEvent::CompilationEvent::post(event, 2433 task->compile_id(), 2434 task->compiler()->type(), 2435 task->method(), 2436 task->comp_level(), 2437 task->is_success(), 2438 task->osr_bci() != CompileBroker::standard_entry_bci, 2439 task->nm_total_size(), 2440 task->num_inlined_bytecodes(), 2441 task->arena_bytes()); 2442 } 2443 2444 int DirectivesStack::_depth = 0; 2445 CompilerDirectives* DirectivesStack::_top = nullptr; 2446 CompilerDirectives* DirectivesStack::_bottom = nullptr; 2447 2448 // Acquires Compilation_lock and waits for it to be notified 2449 // as long as WhiteBox::compilation_locked is true. 2450 static void whitebox_lock_compilation() { 2451 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag); 2452 while (WhiteBox::compilation_locked) { 2453 locker.wait(); 2454 } 2455 } 2456 2457 // ------------------------------------------------------------------ 2458 // CompileBroker::invoke_compiler_on_method 2459 // 2460 // Compile a method. 2461 // 2462 void CompileBroker::invoke_compiler_on_method(CompileTask* task) { 2463 task->print_ul(); 2464 elapsedTimer time; 2465 2466 DirectiveSet* directive = task->directive(); 2467 if (directive->PrintCompilationOption) { 2468 ResourceMark rm; 2469 task->print_tty(); 2470 } 2471 2472 CompilerThread* thread = CompilerThread::current(); 2473 ResourceMark rm(thread); 2474 2475 if (CompilationLog::log() != nullptr) { 2476 CompilationLog::log()->log_compile(thread, task); 2477 } 2478 2479 // Common flags. 2480 int compile_id = task->compile_id(); 2481 int osr_bci = task->osr_bci(); 2482 bool is_osr = (osr_bci != standard_entry_bci); 2483 bool should_log = (thread->log() != nullptr); 2484 bool should_break = false; 2485 const int task_level = task->comp_level(); 2486 AbstractCompiler* comp = task->compiler(); 2487 CompileTrainingData* tdata = task->training_data(); 2488 assert(tdata == nullptr || TrainingData::need_data() || 2489 CDSConfig::is_dumping_preimage_static_archive(), ""); // FIXME: MetaspaceShared::preload_and_dump() messes with RecordTraining flag 2490 { 2491 // create the handle inside it's own block so it can't 2492 // accidentally be referenced once the thread transitions to 2493 // native. The NoHandleMark before the transition should catch 2494 // any cases where this occurs in the future. 2495 methodHandle method(thread, task->method()); 2496 2497 assert(!method->is_native(), "no longer compile natives"); 2498 2499 // Update compile information when using perfdata. 2500 if (UsePerfData) { 2501 update_compile_perf_data(thread, method, is_osr); 2502 } 2503 2504 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level)); 2505 } 2506 2507 if (tdata != nullptr) { 2508 tdata->record_compilation_start(task); 2509 } 2510 2511 should_break = directive->BreakAtCompileOption || task->check_break_at_flags(); 2512 if (should_log && !directive->LogOption) { 2513 should_log = false; 2514 } 2515 2516 // Allocate a new set of JNI handles. 2517 JNIHandleMark jhm(thread); 2518 Method* target_handle = task->method(); 2519 int compilable = ciEnv::MethodCompilable; 2520 const char* failure_reason = nullptr; 2521 bool failure_reason_on_C_heap = false; 2522 const char* retry_message = nullptr; 2523 2524 #if INCLUDE_JVMCI 2525 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) { 2526 JVMCICompiler* jvmci = (JVMCICompiler*) comp; 2527 2528 TraceTime t1("compilation", &time); 2529 EventCompilation event; 2530 JVMCICompileState compile_state(task, jvmci); 2531 JVMCIRuntime *runtime = nullptr; 2532 2533 if (JVMCI::in_shutdown()) { 2534 failure_reason = "in JVMCI shutdown"; 2535 retry_message = "not retryable"; 2536 compilable = ciEnv::MethodCompilable_never; 2537 } else if (compile_state.target_method_is_old()) { 2538 // Skip redefined methods 2539 failure_reason = "redefined method"; 2540 retry_message = "not retryable"; 2541 compilable = ciEnv::MethodCompilable_never; 2542 } else { 2543 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__); 2544 if (env.init_error() != JNI_OK) { 2545 const char* msg = env.init_error_msg(); 2546 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)", 2547 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI); 2548 bool reason_on_C_heap = true; 2549 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it 2550 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime. 2551 bool retryable = env.init_error() == JNI_ENOMEM; 2552 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap); 2553 } 2554 if (failure_reason == nullptr) { 2555 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2556 // Must switch to native to block 2557 ThreadToNativeFromVM ttn(thread); 2558 whitebox_lock_compilation(); 2559 } 2560 methodHandle method(thread, target_handle); 2561 runtime = env.runtime(); 2562 runtime->compile_method(&env, jvmci, method, osr_bci); 2563 2564 failure_reason = compile_state.failure_reason(); 2565 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap(); 2566 if (!compile_state.retryable()) { 2567 retry_message = "not retryable"; 2568 compilable = ciEnv::MethodCompilable_not_at_tier; 2569 } 2570 if (!task->is_success()) { 2571 assert(failure_reason != nullptr, "must specify failure_reason"); 2572 } 2573 } 2574 } 2575 if (!task->is_success() && !JVMCI::in_shutdown()) { 2576 handle_compile_error(thread, task, nullptr, compilable, failure_reason); 2577 } 2578 if (event.should_commit()) { 2579 post_compilation_event(event, task); 2580 } 2581 2582 if (runtime != nullptr) { 2583 runtime->post_compile(thread); 2584 } 2585 } else 2586 #endif // INCLUDE_JVMCI 2587 { 2588 NoHandleMark nhm; 2589 ThreadToNativeFromVM ttn(thread); 2590 2591 ciEnv ci_env(task); 2592 if (should_break) { 2593 ci_env.set_break_at_compile(true); 2594 } 2595 if (should_log) { 2596 ci_env.set_log(thread->log()); 2597 } 2598 assert(thread->env() == &ci_env, "set by ci_env"); 2599 // The thread-env() field is cleared in ~CompileTaskWrapper. 2600 2601 // Cache Jvmti state 2602 bool method_is_old = ci_env.cache_jvmti_state(); 2603 2604 // Skip redefined methods 2605 if (method_is_old) { 2606 ci_env.record_method_not_compilable("redefined method", true); 2607 } 2608 2609 // Cache DTrace flags 2610 ci_env.cache_dtrace_flags(); 2611 2612 ciMethod* target = ci_env.get_method_from_handle(target_handle); 2613 2614 TraceTime t1("compilation", &time); 2615 EventCompilation event; 2616 2617 bool install_code = true; 2618 if (comp == nullptr) { 2619 ci_env.record_method_not_compilable("no compiler"); 2620 } else if (!ci_env.failing()) { 2621 if (WhiteBoxAPI && WhiteBox::compilation_locked) { 2622 whitebox_lock_compilation(); 2623 } 2624 if (StoreCachedCode && task->is_precompiled()) { 2625 install_code = false; // not suitable in the current context 2626 } 2627 comp->compile_method(&ci_env, target, osr_bci, install_code, directive); 2628 2629 /* Repeat compilation without installing code for profiling purposes */ 2630 int repeat_compilation_count = directive->RepeatCompilationOption; 2631 while (repeat_compilation_count > 0) { 2632 ResourceMark rm(thread); 2633 task->print_ul("NO CODE INSTALLED"); 2634 comp->compile_method(&ci_env, target, osr_bci, false, directive); 2635 repeat_compilation_count--; 2636 } 2637 } 2638 2639 DirectivesStack::release(directive); 2640 2641 if (!ci_env.failing() && !task->is_success() && install_code) { 2642 assert(ci_env.failure_reason() != nullptr, "expect failure reason"); 2643 assert(false, "compiler should always document failure: %s", ci_env.failure_reason()); 2644 // The compiler elected, without comment, not to register a result. 2645 // Do not attempt further compilations of this method. 2646 ci_env.record_method_not_compilable("compile failed"); 2647 } 2648 2649 // Copy this bit to the enclosing block: 2650 compilable = ci_env.compilable(); 2651 2652 if (ci_env.failing()) { 2653 // Duplicate the failure reason string, so that it outlives ciEnv 2654 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler); 2655 failure_reason_on_C_heap = true; 2656 retry_message = ci_env.retry_message(); 2657 ci_env.report_failure(failure_reason); 2658 } 2659 2660 if (ci_env.failing()) { 2661 handle_compile_error(thread, task, &ci_env, compilable, failure_reason); 2662 } 2663 if (event.should_commit()) { 2664 post_compilation_event(event, task); 2665 } 2666 } 2667 2668 if (failure_reason != nullptr) { 2669 task->set_failure_reason(failure_reason, failure_reason_on_C_heap); 2670 if (CompilationLog::log() != nullptr) { 2671 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message); 2672 } 2673 if (PrintCompilation) { 2674 FormatBufferResource msg = retry_message != nullptr ? 2675 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) : 2676 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason); 2677 task->print(tty, msg); 2678 } 2679 } 2680 2681 task->mark_finished(os::elapsed_counter()); 2682 2683 if (tdata != nullptr) { 2684 tdata->record_compilation_end(task); 2685 } 2686 2687 methodHandle method(thread, task->method()); 2688 2689 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success()); 2690 2691 collect_statistics(thread, time, task); 2692 2693 if (PrintCompilation && PrintCompilation2) { 2694 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp 2695 tty->print("%4d ", compile_id); // print compilation number 2696 tty->print("%s ", (is_osr ? "%" : (task->is_scc() ? "A" : " "))); 2697 if (task->is_success()) { 2698 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size()); 2699 } 2700 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes()); 2701 } 2702 2703 Log(compilation, codecache) log; 2704 if (log.is_debug()) { 2705 LogStream ls(log.debug()); 2706 codecache_print(&ls, /* detailed= */ false); 2707 } 2708 if (PrintCodeCacheOnCompilation) { 2709 codecache_print(/* detailed= */ false); 2710 } 2711 // Disable compilation, if required. 2712 switch (compilable) { 2713 case ciEnv::MethodCompilable_never: 2714 if (is_osr) 2715 method->set_not_osr_compilable_quietly("MethodCompilable_never"); 2716 else 2717 method->set_not_compilable_quietly("MethodCompilable_never"); 2718 break; 2719 case ciEnv::MethodCompilable_not_at_tier: 2720 if (is_osr) 2721 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2722 else 2723 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level); 2724 break; 2725 } 2726 2727 // Note that the queued_for_compilation bits are cleared without 2728 // protection of a mutex. [They were set by the requester thread, 2729 // when adding the task to the compile queue -- at which time the 2730 // compile queue lock was held. Subsequently, we acquired the compile 2731 // queue lock to get this task off the compile queue; thus (to belabour 2732 // the point somewhat) our clearing of the bits must be occurring 2733 // only after the setting of the bits. See also 14012000 above. 2734 method->clear_queued_for_compilation(); 2735 method->set_pending_queue_processed(false); 2736 2737 if (PrintCompilation) { 2738 ResourceMark rm; 2739 task->print_tty(); 2740 } 2741 } 2742 2743 /** 2744 * The CodeCache is full. Print warning and disable compilation. 2745 * Schedule code cache cleaning so compilation can continue later. 2746 * This function needs to be called only from CodeCache::allocate(), 2747 * since we currently handle a full code cache uniformly. 2748 */ 2749 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) { 2750 UseInterpreter = true; 2751 if (UseCompiler || AlwaysCompileLoopMethods ) { 2752 if (xtty != nullptr) { 2753 stringStream s; 2754 // Dump code cache state into a buffer before locking the tty, 2755 // because log_state() will use locks causing lock conflicts. 2756 CodeCache::log_state(&s); 2757 // Lock to prevent tearing 2758 ttyLocker ttyl; 2759 xtty->begin_elem("code_cache_full"); 2760 xtty->print("%s", s.freeze()); 2761 xtty->stamp(); 2762 xtty->end_elem(); 2763 } 2764 2765 #ifndef PRODUCT 2766 if (ExitOnFullCodeCache) { 2767 codecache_print(/* detailed= */ true); 2768 before_exit(JavaThread::current()); 2769 exit_globals(); // will delete tty 2770 vm_direct_exit(1); 2771 } 2772 #endif 2773 if (UseCodeCacheFlushing) { 2774 // Since code cache is full, immediately stop new compiles 2775 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) { 2776 log_info(codecache)("Code cache is full - disabling compilation"); 2777 } 2778 } else { 2779 disable_compilation_forever(); 2780 } 2781 2782 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning()); 2783 } 2784 } 2785 2786 // ------------------------------------------------------------------ 2787 // CompileBroker::update_compile_perf_data 2788 // 2789 // Record this compilation for debugging purposes. 2790 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) { 2791 ResourceMark rm; 2792 char* method_name = method->name()->as_C_string(); 2793 char current_method[CompilerCounters::cmname_buffer_length]; 2794 size_t maxLen = CompilerCounters::cmname_buffer_length; 2795 2796 const char* class_name = method->method_holder()->name()->as_C_string(); 2797 2798 size_t s1len = strlen(class_name); 2799 size_t s2len = strlen(method_name); 2800 2801 // check if we need to truncate the string 2802 if (s1len + s2len + 2 > maxLen) { 2803 2804 // the strategy is to lop off the leading characters of the 2805 // class name and the trailing characters of the method name. 2806 2807 if (s2len + 2 > maxLen) { 2808 // lop of the entire class name string, let snprintf handle 2809 // truncation of the method name. 2810 class_name += s1len; // null string 2811 } 2812 else { 2813 // lop off the extra characters from the front of the class name 2814 class_name += ((s1len + s2len + 2) - maxLen); 2815 } 2816 } 2817 2818 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name); 2819 2820 int last_compile_type = normal_compile; 2821 if (CICountOSR && is_osr) { 2822 last_compile_type = osr_compile; 2823 } else if (CICountNative && method->is_native()) { 2824 last_compile_type = native_compile; 2825 } 2826 2827 CompilerCounters* counters = thread->counters(); 2828 counters->set_current_method(current_method); 2829 counters->set_compile_type((jlong) last_compile_type); 2830 } 2831 2832 // ------------------------------------------------------------------ 2833 // CompileBroker::collect_statistics 2834 // 2835 // Collect statistics about the compilation. 2836 2837 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) { 2838 bool success = task->is_success(); 2839 methodHandle method (thread, task->method()); 2840 int compile_id = task->compile_id(); 2841 bool is_osr = (task->osr_bci() != standard_entry_bci); 2842 const int comp_level = task->comp_level(); 2843 CompilerCounters* counters = thread->counters(); 2844 2845 MutexLocker locker(CompileStatistics_lock); 2846 2847 // _perf variables are production performance counters which are 2848 // updated regardless of the setting of the CITime and CITimeEach flags 2849 // 2850 2851 // account all time, including bailouts and failures in this counter; 2852 // C1 and C2 counters are counting both successful and unsuccessful compiles 2853 _t_total_compilation.add(&time); 2854 2855 if (!success) { 2856 _total_bailout_count++; 2857 if (UsePerfData) { 2858 _perf_last_failed_method->set_value(counters->current_method()); 2859 _perf_last_failed_type->set_value(counters->compile_type()); 2860 _perf_total_bailout_count->inc(); 2861 } 2862 _t_bailedout_compilation.add(&time); 2863 2864 if (CITime || log_is_enabled(Info, init)) { 2865 CompilerStatistics* stats = nullptr; 2866 if (task->is_scc()) { 2867 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2868 stats = &_scc_stats_per_level[level]; 2869 } else { 2870 stats = &_stats_per_level[comp_level-1]; 2871 } 2872 stats->_bailout.update(time, 0); 2873 } 2874 } else if (!task->is_success()) { 2875 if (UsePerfData) { 2876 _perf_last_invalidated_method->set_value(counters->current_method()); 2877 _perf_last_invalidated_type->set_value(counters->compile_type()); 2878 _perf_total_invalidated_count->inc(); 2879 } 2880 _total_invalidated_count++; 2881 _t_invalidated_compilation.add(&time); 2882 2883 if (CITime || log_is_enabled(Info, init)) { 2884 CompilerStatistics* stats = nullptr; 2885 if (task->is_scc()) { 2886 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2887 stats = &_scc_stats_per_level[level]; 2888 } else { 2889 stats = &_stats_per_level[comp_level-1]; 2890 } 2891 stats->_invalidated.update(time, 0); 2892 } 2893 } else { 2894 // Compilation succeeded 2895 2896 // update compilation ticks - used by the implementation of 2897 // java.lang.management.CompilationMXBean 2898 _perf_total_compilation->inc(time.ticks()); 2899 _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time; 2900 2901 if (CITime || log_is_enabled(Info, init)) { 2902 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes(); 2903 if (is_osr) { 2904 _t_osr_compilation.add(&time); 2905 _sum_osr_bytes_compiled += bytes_compiled; 2906 } else { 2907 _t_standard_compilation.add(&time); 2908 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes(); 2909 } 2910 2911 // Collect statistic per compilation level 2912 if (task->is_scc()) { 2913 _scc_stats._standard.update(time, bytes_compiled); 2914 _scc_stats._nmethods_size += task->nm_total_size(); 2915 _scc_stats._nmethods_code_size += task->nm_insts_size(); 2916 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1); 2917 CompilerStatistics* stats = &_scc_stats_per_level[level]; 2918 stats->_standard.update(time, bytes_compiled); 2919 stats->_nmethods_size += task->nm_total_size(); 2920 stats->_nmethods_code_size += task->nm_insts_size(); 2921 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) { 2922 CompilerStatistics* stats = &_stats_per_level[comp_level-1]; 2923 if (is_osr) { 2924 stats->_osr.update(time, bytes_compiled); 2925 } else { 2926 stats->_standard.update(time, bytes_compiled); 2927 } 2928 stats->_nmethods_size += task->nm_total_size(); 2929 stats->_nmethods_code_size += task->nm_insts_size(); 2930 } else { 2931 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level); 2932 } 2933 2934 // Collect statistic per compiler 2935 AbstractCompiler* comp = task->compiler(); 2936 if (comp && !task->is_scc()) { 2937 CompilerStatistics* stats = comp->stats(); 2938 if (is_osr) { 2939 stats->_osr.update(time, bytes_compiled); 2940 } else { 2941 stats->_standard.update(time, bytes_compiled); 2942 } 2943 stats->_nmethods_size += task->nm_total_size(); 2944 stats->_nmethods_code_size += task->nm_insts_size(); 2945 } else if (!task->is_scc()) { // if (!comp) 2946 assert(false, "Compiler object must exist"); 2947 } 2948 } 2949 2950 if (UsePerfData) { 2951 // save the name of the last method compiled 2952 _perf_last_method->set_value(counters->current_method()); 2953 _perf_last_compile_type->set_value(counters->compile_type()); 2954 _perf_last_compile_size->set_value(method->code_size() + 2955 task->num_inlined_bytecodes()); 2956 if (is_osr) { 2957 _perf_osr_compilation->inc(time.ticks()); 2958 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2959 } else { 2960 _perf_standard_compilation->inc(time.ticks()); 2961 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes()); 2962 } 2963 } 2964 2965 if (CITimeEach) { 2966 double compile_time = time.seconds(); 2967 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time; 2968 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)", 2969 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes()); 2970 } 2971 2972 // Collect counts of successful compilations 2973 _sum_nmethod_size += task->nm_total_size(); 2974 _sum_nmethod_code_size += task->nm_insts_size(); 2975 _total_compile_count++; 2976 2977 if (UsePerfData) { 2978 _perf_sum_nmethod_size->inc( task->nm_total_size()); 2979 _perf_sum_nmethod_code_size->inc(task->nm_insts_size()); 2980 _perf_total_compile_count->inc(); 2981 } 2982 2983 if (is_osr) { 2984 if (UsePerfData) _perf_total_osr_compile_count->inc(); 2985 _total_osr_compile_count++; 2986 } else { 2987 if (UsePerfData) _perf_total_standard_compile_count->inc(); 2988 _total_standard_compile_count++; 2989 } 2990 } 2991 // set the current method for the thread to null 2992 if (UsePerfData) counters->set_current_method(""); 2993 } 2994 2995 const char* CompileBroker::compiler_name(int comp_level) { 2996 AbstractCompiler *comp = CompileBroker::compiler(comp_level); 2997 if (comp == nullptr) { 2998 return "no compiler"; 2999 } else { 3000 return (comp->name()); 3001 } 3002 } 3003 3004 jlong CompileBroker::total_compilation_ticks() { 3005 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0; 3006 } 3007 3008 void CompileBroker::log_not_entrant(nmethod* nm) { 3009 _total_not_entrant_count++; 3010 if (CITime || log_is_enabled(Info, init)) { 3011 CompilerStatistics* stats = nullptr; 3012 int level = nm->comp_level(); 3013 if (nm->is_scc()) { 3014 if (nm->preloaded()) { 3015 assert(level == CompLevel_full_optimization, "%d", level); 3016 level = CompLevel_full_optimization + 1; 3017 } 3018 stats = &_scc_stats_per_level[level - 1]; 3019 } else { 3020 stats = &_stats_per_level[level - 1]; 3021 } 3022 stats->_made_not_entrant._count++; 3023 } 3024 } 3025 3026 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) { 3027 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}", 3028 name, stats->bytes_per_second(), 3029 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count, 3030 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count, 3031 stats->_nmethods_size, stats->_nmethods_code_size); 3032 } 3033 3034 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) { 3035 if (data._count > 0) { 3036 st->print("; %s: %4u methods", name, data._count); 3037 if (print_time) { 3038 st->print(" (in %.3fs)", data._time.seconds()); 3039 } 3040 } 3041 } 3042 3043 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) { 3044 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count); 3045 if (stats->_standard._count > 0) { 3046 st->print(" (in %.3fs)", stats->_standard._time.seconds()); 3047 } 3048 print_helper(st, "osr", stats->_osr); 3049 print_helper(st, "bailout", stats->_bailout); 3050 print_helper(st, "invalid", stats->_invalidated); 3051 print_helper(st, "not_entrant", stats->_made_not_entrant, false); 3052 st->cr(); 3053 } 3054 3055 static void print_queue_info(outputStream* st, CompileQueue* queue) { 3056 if (queue != nullptr) { 3057 MutexLocker ml(queue->lock()); 3058 3059 uint total_cnt = 0; 3060 uint active_cnt = 0; 3061 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3062 guarantee(jt != nullptr, ""); 3063 if (jt->is_Compiler_thread()) { 3064 CompilerThread* ct = (CompilerThread*)jt; 3065 3066 guarantee(ct != nullptr, ""); 3067 if (ct->queue() == queue) { 3068 ++total_cnt; 3069 CompileTask* task = ct->task(); 3070 if (task != nullptr) { 3071 ++active_cnt; 3072 } 3073 } 3074 } 3075 } 3076 3077 st->print(" %s (%d active / %d total threads): %u tasks", 3078 queue->name(), active_cnt, total_cnt, queue->size()); 3079 if (queue->size() > 0) { 3080 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5 3081 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) { 3082 int tier = task->comp_level(); 3083 if (task->is_scc() && task->preload()) { 3084 assert(tier == CompLevel_full_optimization, "%d", tier); 3085 tier = CompLevel_full_optimization + 1; 3086 } 3087 counts[tier-1]++; 3088 } 3089 st->print(":"); 3090 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3091 uint cnt = counts[tier-1]; 3092 if (cnt > 0) { 3093 st->print(" T%d: %u tasks;", tier, cnt); 3094 } 3095 } 3096 } 3097 st->cr(); 3098 3099 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) { 3100 // guarantee(jt != nullptr, ""); 3101 // if (jt->is_Compiler_thread()) { 3102 // CompilerThread* ct = (CompilerThread*)jt; 3103 // 3104 // guarantee(ct != nullptr, ""); 3105 // if (ct->queue() == queue) { 3106 // ResourceMark rm; 3107 // CompileTask* task = ct->task(); 3108 // st->print(" %s: ", ct->name_raw()); 3109 // if (task != nullptr) { 3110 // task->print(st, nullptr, true /*short_form*/, false /*cr*/); 3111 // } 3112 // st->cr(); 3113 // } 3114 // } 3115 // } 3116 } 3117 } 3118 void CompileBroker::print_statistics_on(outputStream* st) { 3119 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant", 3120 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count); 3121 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3122 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]); 3123 } 3124 st->cr(); 3125 3126 if (LoadCachedCode || StoreCachedCode) { 3127 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3128 if (tier != CompLevel_full_profile) { 3129 print_tier_helper(st, "SC T", tier, &_scc_stats_per_level[tier - 1]); 3130 } 3131 } 3132 st->cr(); 3133 } 3134 3135 print_queue_info(st, _c1_compile_queue); 3136 print_queue_info(st, _c2_compile_queue); 3137 print_queue_info(st, _c3_compile_queue); 3138 print_queue_info(st, _sc1_compile_queue); 3139 print_queue_info(st, _sc2_compile_queue); 3140 } 3141 3142 void CompileBroker::print_times(bool per_compiler, bool aggregate) { 3143 if (per_compiler) { 3144 if (aggregate) { 3145 tty->cr(); 3146 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds()); 3147 tty->print_cr("------------------------------------------------"); 3148 tty->cr(); 3149 } 3150 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) { 3151 AbstractCompiler* comp = _compilers[i]; 3152 if (comp != nullptr) { 3153 print_times(comp->name(), comp->stats()); 3154 } 3155 } 3156 if (_scc_stats._standard._count > 0) { 3157 print_times("SC", &_scc_stats); 3158 } 3159 if (aggregate) { 3160 tty->cr(); 3161 tty->print_cr("Individual compilation Tier times (for compiled methods only)"); 3162 tty->print_cr("------------------------------------------------"); 3163 tty->cr(); 3164 } 3165 char tier_name[256]; 3166 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) { 3167 CompilerStatistics* stats = &_stats_per_level[tier-1]; 3168 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier); 3169 print_times(tier_name, stats); 3170 } 3171 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) { 3172 CompilerStatistics* stats = &_scc_stats_per_level[tier-1]; 3173 if (stats->_standard._bytes > 0) { 3174 os::snprintf_checked(tier_name, sizeof(tier_name), "SC T%d", tier); 3175 print_times(tier_name, stats); 3176 } 3177 } 3178 } 3179 3180 if (!aggregate) { 3181 return; 3182 } 3183 3184 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation; 3185 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation; 3186 elapsedTimer total_compilation = CompileBroker::_t_total_compilation; 3187 3188 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled; 3189 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled; 3190 3191 uint standard_compile_count = CompileBroker::_total_standard_compile_count; 3192 uint osr_compile_count = CompileBroker::_total_osr_compile_count; 3193 uint total_compile_count = CompileBroker::_total_compile_count; 3194 uint total_bailout_count = CompileBroker::_total_bailout_count; 3195 uint total_invalidated_count = CompileBroker::_total_invalidated_count; 3196 3197 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size; 3198 uint nmethods_size = CompileBroker::_sum_nmethod_size; 3199 3200 tty->cr(); 3201 tty->print_cr("Accumulated compiler times"); 3202 tty->print_cr("----------------------------------------------------------"); 3203 //0000000000111111111122222222223333333333444444444455555555556666666666 3204 //0123456789012345678901234567890123456789012345678901234567890123456789 3205 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds()); 3206 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s", 3207 standard_compilation.seconds(), 3208 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count); 3209 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s", 3210 CompileBroker::_t_bailedout_compilation.seconds(), 3211 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count); 3212 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s", 3213 osr_compilation.seconds(), 3214 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count); 3215 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s", 3216 CompileBroker::_t_invalidated_compilation.seconds(), 3217 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count); 3218 3219 if (StoreCachedCode || LoadCachedCode) { // Check flags because SC cache could be closed already 3220 tty->cr(); 3221 SCCache::print_timers_on(tty); 3222 } 3223 AbstractCompiler *comp = compiler(CompLevel_simple); 3224 if (comp != nullptr) { 3225 tty->cr(); 3226 comp->print_timers(); 3227 } 3228 comp = compiler(CompLevel_full_optimization); 3229 if (comp != nullptr) { 3230 tty->cr(); 3231 comp->print_timers(); 3232 } 3233 comp = _compilers[2]; 3234 if (comp != nullptr) { 3235 tty->cr(); 3236 comp->print_timers(); 3237 } 3238 #if INCLUDE_JVMCI 3239 if (EnableJVMCI) { 3240 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null()); 3241 if (jvmci_comp != nullptr && jvmci_comp != comp) { 3242 tty->cr(); 3243 jvmci_comp->print_timers(); 3244 } 3245 } 3246 #endif 3247 3248 tty->cr(); 3249 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count); 3250 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count); 3251 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count); 3252 uint tcb = osr_bytes_compiled + standard_bytes_compiled; 3253 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb); 3254 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled); 3255 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled); 3256 double tcs = total_compilation.seconds(); 3257 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs); 3258 tty->print_cr(" Average compilation speed : %8u bytes/s", bps); 3259 tty->cr(); 3260 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size); 3261 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size); 3262 } 3263 3264 // Print general/accumulated JIT information. 3265 void CompileBroker::print_info(outputStream *out) { 3266 if (out == nullptr) out = tty; 3267 out->cr(); 3268 out->print_cr("======================"); 3269 out->print_cr(" General JIT info "); 3270 out->print_cr("======================"); 3271 out->cr(); 3272 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off"); 3273 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount); 3274 out->cr(); 3275 out->print_cr("CodeCache overview"); 3276 out->print_cr("--------------------------------------------------------"); 3277 out->cr(); 3278 out->print_cr(" Reserved size : " SIZE_FORMAT_W(7) " KB", CodeCache::max_capacity() / K); 3279 out->print_cr(" Committed size : " SIZE_FORMAT_W(7) " KB", CodeCache::capacity() / K); 3280 out->print_cr(" Unallocated capacity : " SIZE_FORMAT_W(7) " KB", CodeCache::unallocated_capacity() / K); 3281 out->cr(); 3282 } 3283 3284 // Note: tty_lock must not be held upon entry to this function. 3285 // Print functions called from herein do "micro-locking" on tty_lock. 3286 // That's a tradeoff which keeps together important blocks of output. 3287 // At the same time, continuous tty_lock hold time is kept in check, 3288 // preventing concurrently printing threads from stalling a long time. 3289 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) { 3290 TimeStamp ts_total; 3291 TimeStamp ts_global; 3292 TimeStamp ts; 3293 3294 bool allFun = !strcmp(function, "all"); 3295 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun; 3296 bool usedSpace = !strcmp(function, "UsedSpace") || allFun; 3297 bool freeSpace = !strcmp(function, "FreeSpace") || allFun; 3298 bool methodCount = !strcmp(function, "MethodCount") || allFun; 3299 bool methodSpace = !strcmp(function, "MethodSpace") || allFun; 3300 bool methodAge = !strcmp(function, "MethodAge") || allFun; 3301 bool methodNames = !strcmp(function, "MethodNames") || allFun; 3302 bool discard = !strcmp(function, "discard") || allFun; 3303 3304 if (out == nullptr) { 3305 out = tty; 3306 } 3307 3308 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) { 3309 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function); 3310 out->cr(); 3311 return; 3312 } 3313 3314 ts_total.update(); // record starting point 3315 3316 if (aggregate) { 3317 print_info(out); 3318 } 3319 3320 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function. 3321 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap. 3322 // When we request individual parts of the analysis via the jcmd interface, it is possible 3323 // that in between another thread (another jcmd user or the vm running into CodeCache OOM) 3324 // updated the aggregated data. We will then see a modified, but again consistent, view 3325 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold 3326 // a lock across user interaction. 3327 3328 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock. 3329 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time, 3330 // leading to an unnecessarily long hold time of the other locks we acquired before. 3331 ts.update(); // record starting point 3332 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag); 3333 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds()); 3334 3335 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache. 3336 // Unfortunately, such protection is not sufficient: 3337 // When a new nmethod is created via ciEnv::register_method(), the 3338 // Compile_lock is taken first. After some initializations, 3339 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock 3340 // immediately (after finalizing the oop references). To lock out concurrent 3341 // modifiers, we have to grab both locks as well in the described sequence. 3342 // 3343 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock 3344 // for the entire duration of aggregation and printing. That makes sure we see 3345 // a consistent picture and do not run into issues caused by concurrent alterations. 3346 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() && 3347 !Compile_lock->owned_by_self(); 3348 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() && 3349 !CodeCache_lock->owned_by_self(); 3350 bool take_global_lock_1 = allFun && should_take_Compile_lock; 3351 bool take_global_lock_2 = allFun && should_take_CodeCache_lock; 3352 bool take_function_lock_1 = !allFun && should_take_Compile_lock; 3353 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock; 3354 bool take_global_locks = take_global_lock_1 || take_global_lock_2; 3355 bool take_function_locks = take_function_lock_1 || take_function_lock_2; 3356 3357 ts_global.update(); // record starting point 3358 3359 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag); 3360 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag); 3361 if (take_global_locks) { 3362 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds()); 3363 ts_global.update(); // record starting point 3364 } 3365 3366 if (aggregate) { 3367 ts.update(); // record starting point 3368 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag); 3369 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag); 3370 if (take_function_locks) { 3371 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds()); 3372 } 3373 3374 ts.update(); // record starting point 3375 CodeCache::aggregate(out, granularity); 3376 if (take_function_locks) { 3377 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds()); 3378 } 3379 } 3380 3381 if (usedSpace) CodeCache::print_usedSpace(out); 3382 if (freeSpace) CodeCache::print_freeSpace(out); 3383 if (methodCount) CodeCache::print_count(out); 3384 if (methodSpace) CodeCache::print_space(out); 3385 if (methodAge) CodeCache::print_age(out); 3386 if (methodNames) { 3387 if (allFun) { 3388 // print_names() can only be used safely if the locks have been continuously held 3389 // since aggregation begin. That is true only for function "all". 3390 CodeCache::print_names(out); 3391 } else { 3392 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'"); 3393 } 3394 } 3395 if (discard) CodeCache::discard(out); 3396 3397 if (take_global_locks) { 3398 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds()); 3399 } 3400 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds()); 3401 }