1 /*
2 * Copyright (c) 1999, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLinkedClassBulkLoader.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "classfile/javaClasses.inline.hpp"
28 #include "classfile/symbolTable.hpp"
29 #include "classfile/vmClasses.hpp"
30 #include "classfile/vmSymbols.hpp"
31 #include "code/aotCodeCache.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/codeHeapState.hpp"
34 #include "code/dependencyContext.hpp"
35 #include "compiler/compilationLog.hpp"
36 #include "compiler/compilationMemoryStatistic.hpp"
37 #include "compiler/compilationPolicy.hpp"
38 #include "compiler/compileBroker.hpp"
39 #include "compiler/compileLog.hpp"
40 #include "compiler/compilerDefinitions.inline.hpp"
41 #include "compiler/compilerEvent.hpp"
42 #include "compiler/compilerOracle.hpp"
43 #include "compiler/directivesParser.hpp"
44 #include "compiler/recompilationPolicy.hpp"
45 #include "gc/shared/memAllocator.hpp"
46 #include "interpreter/linkResolver.hpp"
47 #include "jfr/jfrEvents.hpp"
48 #include "jvm.h"
49 #include "logging/log.hpp"
50 #include "logging/logStream.hpp"
51 #include "memory/allocation.inline.hpp"
52 #include "memory/resourceArea.hpp"
53 #include "memory/universe.hpp"
54 #include "oops/method.inline.hpp"
55 #include "oops/methodData.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "prims/jvmtiExport.hpp"
58 #include "prims/nativeLookup.hpp"
59 #include "prims/whitebox.hpp"
60 #include "runtime/atomicAccess.hpp"
61 #include "runtime/escapeBarrier.hpp"
62 #include "runtime/globals_extension.hpp"
63 #include "runtime/handles.inline.hpp"
64 #include "runtime/init.hpp"
65 #include "runtime/interfaceSupport.inline.hpp"
66 #include "runtime/java.hpp"
67 #include "runtime/javaCalls.hpp"
68 #include "runtime/jniHandles.inline.hpp"
69 #include "runtime/os.hpp"
70 #include "runtime/perfData.hpp"
71 #include "runtime/safepointVerifiers.hpp"
72 #include "runtime/sharedRuntime.hpp"
73 #include "runtime/threads.hpp"
74 #include "runtime/threadSMR.inline.hpp"
75 #include "runtime/timerTrace.hpp"
76 #include "runtime/vframe.inline.hpp"
77 #include "services/management.hpp"
78 #include "utilities/debug.hpp"
79 #include "utilities/dtrace.hpp"
80 #include "utilities/events.hpp"
81 #include "utilities/formatBuffer.hpp"
82 #include "utilities/macros.hpp"
83 #include "utilities/nonblockingQueue.inline.hpp"
84 #ifdef COMPILER1
85 #include "c1/c1_Compiler.hpp"
86 #endif
87 #ifdef COMPILER2
88 #include "opto/c2compiler.hpp"
89 #endif
90 #if INCLUDE_JVMCI
91 #include "jvmci/jvmciEnv.hpp"
92 #include "jvmci/jvmciRuntime.hpp"
93 #endif
94
95 #ifdef DTRACE_ENABLED
96
97 // Only bother with this argument setup if dtrace is available
98
99 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name) \
100 { \
101 Symbol* klass_name = (method)->klass_name(); \
102 Symbol* name = (method)->name(); \
103 Symbol* signature = (method)->signature(); \
104 HOTSPOT_METHOD_COMPILE_BEGIN( \
105 (char *) comp_name, strlen(comp_name), \
106 (char *) klass_name->bytes(), klass_name->utf8_length(), \
107 (char *) name->bytes(), name->utf8_length(), \
108 (char *) signature->bytes(), signature->utf8_length()); \
109 }
110
111 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success) \
112 { \
113 Symbol* klass_name = (method)->klass_name(); \
114 Symbol* name = (method)->name(); \
115 Symbol* signature = (method)->signature(); \
116 HOTSPOT_METHOD_COMPILE_END( \
117 (char *) comp_name, strlen(comp_name), \
118 (char *) klass_name->bytes(), klass_name->utf8_length(), \
119 (char *) name->bytes(), name->utf8_length(), \
120 (char *) signature->bytes(), signature->utf8_length(), (success)); \
121 }
122
123 #else // ndef DTRACE_ENABLED
124
125 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)
126 #define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)
127
128 #endif // ndef DTRACE_ENABLED
129
130 bool CompileBroker::_initialized = false;
131 volatile bool CompileBroker::_should_block = false;
132 volatile int CompileBroker::_print_compilation_warning = 0;
133 volatile jint CompileBroker::_should_compile_new_jobs = run_compilation;
134
135 // The installed compiler(s)
136 AbstractCompiler* CompileBroker::_compilers[2];
137
138 // The maximum numbers of compiler threads to be determined during startup.
139 int CompileBroker::_c1_count = 0;
140 int CompileBroker::_c2_count = 0;
141 int CompileBroker::_ac_count = 0;
142
143 // An array of compiler names as Java String objects
144 jobject* CompileBroker::_compiler1_objects = nullptr;
145 jobject* CompileBroker::_compiler2_objects = nullptr;
146 jobject* CompileBroker::_ac_objects = nullptr;
147
148 CompileLog** CompileBroker::_compiler1_logs = nullptr;
149 CompileLog** CompileBroker::_compiler2_logs = nullptr;
150 CompileLog** CompileBroker::_ac_logs = nullptr;
151
152 // These counters are used to assign an unique ID to each compilation.
153 volatile jint CompileBroker::_compilation_id = 0;
154 volatile jint CompileBroker::_osr_compilation_id = 0;
155 volatile jint CompileBroker::_native_compilation_id = 0;
156
157 // Performance counters
158 PerfCounter* CompileBroker::_perf_total_compilation = nullptr;
159 PerfCounter* CompileBroker::_perf_osr_compilation = nullptr;
160 PerfCounter* CompileBroker::_perf_standard_compilation = nullptr;
161
162 PerfCounter* CompileBroker::_perf_total_bailout_count = nullptr;
163 PerfCounter* CompileBroker::_perf_total_invalidated_count = nullptr;
164 PerfCounter* CompileBroker::_perf_total_compile_count = nullptr;
165 PerfCounter* CompileBroker::_perf_total_osr_compile_count = nullptr;
166 PerfCounter* CompileBroker::_perf_total_standard_compile_count = nullptr;
167
168 PerfCounter* CompileBroker::_perf_sum_osr_bytes_compiled = nullptr;
169 PerfCounter* CompileBroker::_perf_sum_standard_bytes_compiled = nullptr;
170 PerfCounter* CompileBroker::_perf_sum_nmethod_size = nullptr;
171 PerfCounter* CompileBroker::_perf_sum_nmethod_code_size = nullptr;
172
173 PerfStringVariable* CompileBroker::_perf_last_method = nullptr;
174 PerfStringVariable* CompileBroker::_perf_last_failed_method = nullptr;
175 PerfStringVariable* CompileBroker::_perf_last_invalidated_method = nullptr;
176 PerfVariable* CompileBroker::_perf_last_compile_type = nullptr;
177 PerfVariable* CompileBroker::_perf_last_compile_size = nullptr;
178 PerfVariable* CompileBroker::_perf_last_failed_type = nullptr;
179 PerfVariable* CompileBroker::_perf_last_invalidated_type = nullptr;
180
181 // Timers and counters for generating statistics
182 elapsedTimer CompileBroker::_t_total_compilation;
183 elapsedTimer CompileBroker::_t_osr_compilation;
184 elapsedTimer CompileBroker::_t_standard_compilation;
185 elapsedTimer CompileBroker::_t_invalidated_compilation;
186 elapsedTimer CompileBroker::_t_bailedout_compilation;
187
188 uint CompileBroker::_total_bailout_count = 0;
189 uint CompileBroker::_total_invalidated_count = 0;
190 uint CompileBroker::_total_not_entrant_count = 0;
191 uint CompileBroker::_total_compile_count = 0;
192 uint CompileBroker::_total_osr_compile_count = 0;
193 uint CompileBroker::_total_standard_compile_count = 0;
194 uint CompileBroker::_total_compiler_stopped_count = 0;
195 uint CompileBroker::_total_compiler_restarted_count = 0;
196
197 uint CompileBroker::_sum_osr_bytes_compiled = 0;
198 uint CompileBroker::_sum_standard_bytes_compiled = 0;
199 uint CompileBroker::_sum_nmethod_size = 0;
200 uint CompileBroker::_sum_nmethod_code_size = 0;
201
202 jlong CompileBroker::_peak_compilation_time = 0;
203
204 CompilerStatistics CompileBroker::_stats_per_level[CompLevel_full_optimization];
205 CompilerStatistics CompileBroker::_aot_stats;
206 CompilerStatistics CompileBroker::_aot_stats_per_level[CompLevel_full_optimization + 1];
207
208 CompileQueue* CompileBroker::_c2_compile_queue = nullptr;
209 CompileQueue* CompileBroker::_c1_compile_queue = nullptr;
210 CompileQueue* CompileBroker::_ac1_compile_queue = nullptr;
211 CompileQueue* CompileBroker::_ac2_compile_queue = nullptr;
212
213 bool compileBroker_init() {
214 if (LogEvents) {
215 CompilationLog::init();
216 }
217
218 // init directives stack, adding default directive
219 DirectivesStack::init();
220
221 if (DirectivesParser::has_file()) {
222 return DirectivesParser::parse_from_flag();
223 } else if (CompilerDirectivesPrint) {
224 // Print default directive even when no other was added
225 DirectivesStack::print(tty);
226 }
227
228 return true;
229 }
230
231 CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
232 CompilerThread* thread = CompilerThread::current();
233 thread->set_task(task);
234 CompileLog* log = thread->log();
235 thread->timeout()->arm();
236 if (log != nullptr && !task->is_unloaded()) task->log_task_start(log);
237 }
238
239 CompileTaskWrapper::~CompileTaskWrapper() {
240 CompilerThread* thread = CompilerThread::current();
241
242 // First, disarm the timeout. This still relies on the underlying task.
243 thread->timeout()->disarm();
244
245 CompileTask* task = thread->task();
246 CompileLog* log = thread->log();
247 AbstractCompiler* comp = thread->compiler();
248 if (log != nullptr && !task->is_unloaded()) task->log_task_done(log);
249 thread->set_task(nullptr);
250 thread->set_env(nullptr);
251 if (task->is_blocking()) {
252 bool free_task = false;
253 {
254 MutexLocker notifier(thread, CompileTaskWait_lock);
255 task->mark_complete();
256 #if INCLUDE_JVMCI
257 if (comp->is_jvmci()) {
258 if (!task->has_waiter()) {
259 // The waiting thread timed out and thus did not delete the task.
260 free_task = true;
261 }
262 task->set_blocking_jvmci_compile_state(nullptr);
263 }
264 #endif
265 if (!free_task) {
266 // Notify the waiting thread that the compilation has completed
267 // so that it can free the task.
268 CompileTaskWait_lock->notify_all();
269 }
270 }
271 if (free_task) {
272 // The task can only be deleted once the task lock is released.
273 delete task;
274 }
275 } else {
276 task->mark_complete();
277
278 // By convention, the compiling thread is responsible for deleting
279 // a non-blocking CompileTask.
280 delete task;
281 }
282 }
283
284 /**
285 * Check if a CompilerThread can be removed and update count if requested.
286 */
287 bool CompileBroker::can_remove(CompilerThread *ct, bool do_it) {
288 assert(UseDynamicNumberOfCompilerThreads, "or shouldn't be here");
289 if (!ReduceNumberOfCompilerThreads) return false;
290
291 if (RecompilationPolicy::have_recompilation_work()) return false;
292
293 AbstractCompiler *compiler = ct->compiler();
294 int compiler_count = compiler->num_compiler_threads();
295 bool c1 = compiler->is_c1();
296
297 // Keep at least 1 compiler thread of each type.
298 if (compiler_count < 2) return false;
299
300 // Keep thread alive for at least some time.
301 if (ct->idle_time_millis() < (c1 ? 500 : 100)) return false;
302
303 #if INCLUDE_JVMCI
304 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) {
305 // Handles for JVMCI thread objects may get released concurrently.
306 if (do_it) {
307 assert(CompileThread_lock->owner() == ct, "must be holding lock");
308 } else {
309 // Skip check if it's the last thread and let caller check again.
310 return true;
311 }
312 }
313 #endif
314
315 // We only allow the last compiler thread of each type to get removed.
316 jobject last_compiler = c1 ? compiler1_object(compiler_count - 1)
317 : compiler2_object(compiler_count - 1);
318 if (ct->threadObj() == JNIHandles::resolve_non_null(last_compiler)) {
319 if (do_it) {
320 assert_locked_or_safepoint(CompileThread_lock); // Update must be consistent.
321 compiler->set_num_compiler_threads(compiler_count - 1);
322 #if INCLUDE_JVMCI
323 if (compiler->is_jvmci() && !UseJVMCINativeLibrary) {
324 // Old j.l.Thread object can die when no longer referenced elsewhere.
325 JNIHandles::destroy_global(compiler2_object(compiler_count - 1));
326 _compiler2_objects[compiler_count - 1] = nullptr;
327 }
328 #endif
329 }
330 return true;
331 }
332 return false;
333 }
334
335 /**
336 * Add a CompileTask to a CompileQueue.
337 */
338 void CompileQueue::add(CompileTask* task) {
339 assert(_lock->owned_by_self(), "must own lock");
340
341 task->set_next(nullptr);
342 task->set_prev(nullptr);
343
344 if (_last == nullptr) {
345 // The compile queue is empty.
346 assert(_first == nullptr, "queue is empty");
347 _first = task;
348 _last = task;
349 } else {
350 // Append the task to the queue.
351 assert(_last->next() == nullptr, "not last");
352 _last->set_next(task);
353 task->set_prev(_last);
354 _last = task;
355 }
356 ++_size;
357 ++_total_added;
358 if (_size > _peak_size) {
359 _peak_size = _size;
360 }
361
362 // Mark the method as being in the compile queue.
363 task->method()->set_queued_for_compilation();
364
365 task->mark_queued(os::elapsed_counter());
366
367 if (CIPrintCompileQueue) {
368 print_tty();
369 }
370
371 if (LogCompilation && xtty != nullptr) {
372 task->log_task_queued();
373 }
374
375 if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) {
376 CompileTrainingData* ctd = CompileTrainingData::make(task);
377 if (ctd != nullptr) {
378 task->set_training_data(ctd);
379 }
380 }
381
382 // Notify CompilerThreads that a task is available.
383 _lock->notify_all();
384 }
385
386 void CompileQueue::add_pending(CompileTask* task) {
387 assert(_lock->owned_by_self() == false, "must NOT own lock");
388 assert(UseLockFreeCompileQueues, "");
389 task->method()->set_queued_for_compilation();
390 _queue.push(*task);
391 // FIXME: additional coordination needed? e.g., is it possible for compiler thread to block w/o processing pending tasks?
392 if (is_empty()) {
393 MutexLocker ml(_lock);
394 _lock->notify_all();
395 }
396 }
397
398 static bool process_pending(CompileTask* task) {
399 // guarantee(task->method()->queued_for_compilation(), "");
400 if (task->is_unloaded()) {
401 return true; // unloaded
402 }
403 task->method()->set_queued_for_compilation(); // FIXME
404 if (task->method()->pending_queue_processed()) {
405 return true; // already queued
406 }
407 // Mark the method as being in the compile queue.
408 task->method()->set_pending_queue_processed();
409 if (CompileBroker::compilation_is_complete(task->method(), task->osr_bci(), task->comp_level(),
410 task->requires_online_compilation(), task->compile_reason())) {
411 return true; // already compiled
412 }
413 return false; // active
414 }
415
416 void CompileQueue::transfer_pending() {
417 assert(_lock->owned_by_self(), "must own lock");
418
419 CompileTask* task;
420 while ((task = _queue.pop()) != nullptr) {
421 bool is_stale = process_pending(task);
422 if (is_stale) {
423 task->set_next(_first_stale);
424 task->set_prev(nullptr);
425 _first_stale = task;
426 } else {
427 add(task);
428 }
429 }
430 }
431
432 /**
433 * Empties compilation queue by deleting all compilation tasks.
434 * Furthermore, the method wakes up all threads that are waiting
435 * on a compilation task to finish. This can happen if background
436 * compilation is disabled.
437 */
438 void CompileQueue::delete_all() {
439 MutexLocker mu(_lock);
440 transfer_pending();
441
442 CompileTask* current = _first;
443
444 // Iterate over all tasks in the compile queue
445 while (current != nullptr) {
446 CompileTask* next = current->next();
447 if (!current->is_blocking()) {
448 // Non-blocking task. No one is waiting for it, delete it now.
449 delete current;
450 } else {
451 // Blocking task. By convention, it is the waiters responsibility
452 // to delete the task. We cannot delete it here, because we do not
453 // coordinate with waiters. We will notify the waiters later.
454 }
455 current = next;
456 }
457 _first = nullptr;
458 _last = nullptr;
459
460 // Wake up all blocking task waiters to deal with remaining blocking
461 // tasks. This is not a performance sensitive path, so we do this
462 // unconditionally to simplify coding/testing.
463 {
464 MonitorLocker ml(Thread::current(), CompileTaskWait_lock);
465 ml.notify_all();
466 }
467
468 // Wake up all threads that block on the queue.
469 _lock->notify_all();
470 }
471
472 /**
473 * Get the next CompileTask from a CompileQueue
474 */
475 CompileTask* CompileQueue::get(CompilerThread* thread) {
476 // save methods from RedefineClasses across safepoint
477 // across compile queue lock below.
478 methodHandle save_method;
479
480 MonitorLocker locker(_lock);
481 transfer_pending();
482
483 RecompilationPolicy::sample_load_average();
484
485 // If _first is null we have no more compile jobs. There are two reasons for
486 // having no compile jobs: First, we compiled everything we wanted. Second,
487 // we ran out of code cache so compilation has been disabled. In the latter
488 // case we perform code cache sweeps to free memory such that we can re-enable
489 // compilation.
490 while (_first == nullptr) {
491 // Exit loop if compilation is disabled forever
492 if (CompileBroker::is_compilation_disabled_forever()) {
493 return nullptr;
494 }
495
496 AbstractCompiler* compiler = thread->compiler();
497 guarantee(compiler != nullptr, "Compiler object must exist");
498 compiler->on_empty_queue(this, thread);
499 if (_first != nullptr) {
500 // The call to on_empty_queue may have temporarily unlocked the MCQ lock
501 // so check again whether any tasks were added to the queue.
502 break;
503 }
504
505 // If we have added stale tasks, there might be waiters that want
506 // the notification these tasks have failed. Normally, this would
507 // be done by a compiler thread that would perform the purge at
508 // the end of some compilation. But, if compile queue is empty,
509 // there is no guarantee compilers would run and do the purge.
510 // Do the purge here and now to unblock the waiters.
511 // Perform this until we run out of stale tasks.
512 while (_first_stale != nullptr) {
513 purge_stale_tasks();
514 }
515 if (_first != nullptr) {
516 // Purge stale tasks may have transferred some new tasks,
517 // so check again.
518 break;
519 }
520
521 // If there are no compilation tasks and we can compile new jobs
522 // (i.e., there is enough free space in the code cache) there is
523 // no need to invoke the GC.
524 // We need a timed wait here, since compiler threads can exit if compilation
525 // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
526 // is not critical and we do not want idle compiler threads to wake up too often.
527 locker.wait(5*1000);
528
529 transfer_pending(); // reacquired lock
530
531 if (RecompilationPolicy::have_recompilation_work()) return nullptr;
532
533 if (UseDynamicNumberOfCompilerThreads && _first == nullptr) {
534 // Still nothing to compile. Give caller a chance to stop this thread.
535 if (CompileBroker::can_remove(CompilerThread::current(), false)) return nullptr;
536 }
537 }
538
539 if (CompileBroker::is_compilation_disabled_forever()) {
540 return nullptr;
541 }
542
543 CompileTask* task;
544 {
545 NoSafepointVerifier nsv;
546 task = CompilationPolicy::select_task(this, thread);
547 if (task != nullptr) {
548 task = task->select_for_compilation();
549 }
550 }
551
552 if (task != nullptr) {
553 // Save method pointers across unlock safepoint. The task is removed from
554 // the compilation queue, which is walked during RedefineClasses.
555 Thread* thread = Thread::current();
556 save_method = methodHandle(thread, task->method());
557
558 remove(task);
559 }
560 purge_stale_tasks(); // may temporarily release MCQ lock
561 return task;
562 }
563
564 // Clean & deallocate stale compile tasks.
565 // Temporarily releases MethodCompileQueue lock.
566 void CompileQueue::purge_stale_tasks() {
567 assert(_lock->owned_by_self(), "must own lock");
568 if (_first_stale != nullptr) {
569 // Stale tasks are purged when MCQ lock is released,
570 // but _first_stale updates are protected by MCQ lock.
571 // Once task processing starts and MCQ lock is released,
572 // other compiler threads can reuse _first_stale.
573 CompileTask* head = _first_stale;
574 _first_stale = nullptr;
575 {
576 MutexUnlocker ul(_lock);
577 for (CompileTask* task = head; task != nullptr; ) {
578 CompileTask* next_task = task->next();
579 task->set_next(nullptr);
580 CompileTaskWrapper ctw(task); // Frees the task
581 task->set_failure_reason("stale task");
582 task = next_task;
583 }
584 }
585 transfer_pending(); // transfer pending after reacquiring MCQ lock
586 }
587 }
588
589 void CompileQueue::remove(CompileTask* task) {
590 assert(_lock->owned_by_self(), "must own lock");
591 if (task->prev() != nullptr) {
592 task->prev()->set_next(task->next());
593 } else {
594 // max is the first element
595 assert(task == _first, "Sanity");
596 _first = task->next();
597 }
598
599 if (task->next() != nullptr) {
600 task->next()->set_prev(task->prev());
601 } else {
602 // max is the last element
603 assert(task == _last, "Sanity");
604 _last = task->prev();
605 }
606 task->set_next(nullptr);
607 task->set_prev(nullptr);
608 --_size;
609 ++_total_removed;
610 }
611
612 void CompileQueue::remove_and_mark_stale(CompileTask* task) {
613 assert(_lock->owned_by_self(), "must own lock");
614 remove(task);
615
616 // Enqueue the task for reclamation (should be done outside MCQ lock)
617 task->set_next(_first_stale);
618 task->set_prev(nullptr);
619 _first_stale = task;
620 }
621
622 // methods in the compile queue need to be marked as used on the stack
623 // so that they don't get reclaimed by Redefine Classes
624 void CompileQueue::mark_on_stack() {
625 for (CompileTask* task = _first; task != nullptr; task = task->next()) {
626 task->mark_on_stack();
627 }
628 for (CompileTask* task = _queue.first(); !_queue.is_end(task); task = task->next()) {
629 assert(task != nullptr, "");
630 task->mark_on_stack();
631 }
632 }
633
634
635 CompileQueue* CompileBroker::compile_queue(int comp_level, bool is_aot) {
636 if (is_c2_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac2_compile_queue : _c2_compile_queue);
637 if (is_c1_compile(comp_level)) return ((is_aot && (_ac_count > 0)) ? _ac1_compile_queue : _c1_compile_queue);
638 return nullptr;
639 }
640
641 CompileQueue* CompileBroker::c1_compile_queue() {
642 return _c1_compile_queue;
643 }
644
645 CompileQueue* CompileBroker::c2_compile_queue() {
646 return _c2_compile_queue;
647 }
648
649 void CompileBroker::print_compile_queues(outputStream* st) {
650 st->print_cr("Current compiles: ");
651
652 char buf[2000];
653 int buflen = sizeof(buf);
654 Threads::print_threads_compiling(st, buf, buflen, /* short_form = */ true);
655
656 st->cr();
657 if (_c1_compile_queue != nullptr) {
658 _c1_compile_queue->print(st);
659 }
660 if (_c2_compile_queue != nullptr) {
661 _c2_compile_queue->print(st);
662 }
663 if (_ac1_compile_queue != nullptr) {
664 _ac1_compile_queue->print(st);
665 }
666 if (_ac2_compile_queue != nullptr) {
667 _ac2_compile_queue->print(st);
668 }
669 }
670
671 void CompileQueue::print(outputStream* st) {
672 assert_locked_or_safepoint(_lock);
673 st->print_cr("%s:", name());
674 CompileTask* task = _first;
675 if (task == nullptr) {
676 st->print_cr("Empty");
677 } else {
678 while (task != nullptr) {
679 task->print(st, nullptr, true, true);
680 task = task->next();
681 }
682 }
683 st->cr();
684 }
685
686 void CompileQueue::print_tty() {
687 stringStream ss;
688 // Dump the compile queue into a buffer before locking the tty
689 print(&ss);
690 {
691 ttyLocker ttyl;
692 tty->print("%s", ss.freeze());
693 }
694 }
695
696 CompilerCounters::CompilerCounters() {
697 _current_method[0] = '\0';
698 _compile_type = CompileBroker::no_compile;
699 }
700
701 #if INCLUDE_JFR && COMPILER2_OR_JVMCI
702 // It appends new compiler phase names to growable array phase_names(a new CompilerPhaseType mapping
703 // in compiler/compilerEvent.cpp) and registers it with its serializer.
704 //
705 // c2 uses explicit CompilerPhaseType idToPhase mapping in opto/phasetype.hpp,
706 // so if c2 is used, it should be always registered first.
707 // This function is called during vm initialization.
708 static void register_jfr_phasetype_serializer(CompilerType compiler_type) {
709 ResourceMark rm;
710 static bool first_registration = true;
711 if (compiler_type == compiler_jvmci) {
712 CompilerEvent::PhaseEvent::get_phase_id("NOT_A_PHASE_NAME", false, false, false);
713 first_registration = false;
714 #ifdef COMPILER2
715 } else if (compiler_type == compiler_c2) {
716 assert(first_registration, "invariant"); // c2 must be registered first.
717 for (int i = 0; i < PHASE_NUM_TYPES; i++) {
718 const char* phase_name = CompilerPhaseTypeHelper::to_description((CompilerPhaseType) i);
719 CompilerEvent::PhaseEvent::get_phase_id(phase_name, false, false, false);
720 }
721 first_registration = false;
722 #endif // COMPILER2
723 }
724 }
725 #endif // INCLUDE_JFR && COMPILER2_OR_JVMCI
726
727 // ------------------------------------------------------------------
728 // CompileBroker::compilation_init
729 //
730 // Initialize the Compilation object
731 void CompileBroker::compilation_init(JavaThread* THREAD) {
732 // No need to initialize compilation system if we do not use it.
733 if (!UseCompiler) {
734 return;
735 }
736 // Set the interface to the current compiler(s).
737 _c1_count = CompilationPolicy::c1_count();
738 _c2_count = CompilationPolicy::c2_count();
739 _ac_count = CompilationPolicy::ac_count();
740
741 #if INCLUDE_JVMCI
742 if (EnableJVMCI) {
743 // This is creating a JVMCICompiler singleton.
744 JVMCICompiler* jvmci = new JVMCICompiler();
745
746 if (UseJVMCICompiler) {
747 _compilers[1] = jvmci;
748 if (FLAG_IS_DEFAULT(JVMCIThreads)) {
749 if (BootstrapJVMCI) {
750 // JVMCI will bootstrap so give it more threads
751 _c2_count = MIN2(32, os::active_processor_count());
752 }
753 } else {
754 _c2_count = JVMCIThreads;
755 }
756 if (FLAG_IS_DEFAULT(JVMCIHostThreads)) {
757 } else {
758 #ifdef COMPILER1
759 _c1_count = JVMCIHostThreads;
760 #endif // COMPILER1
761 }
762 }
763 }
764 #endif // INCLUDE_JVMCI
765
766 #ifdef COMPILER1
767 if (_c1_count > 0) {
768 _compilers[0] = new Compiler();
769 }
770 #endif // COMPILER1
771
772 #ifdef COMPILER2
773 if (true JVMCI_ONLY( && !UseJVMCICompiler)) {
774 if (_c2_count > 0) {
775 _compilers[1] = new C2Compiler();
776 // Register c2 first as c2 CompilerPhaseType idToPhase mapping is explicit.
777 // idToPhase mapping for c2 is in opto/phasetype.hpp
778 JFR_ONLY(register_jfr_phasetype_serializer(compiler_c2);)
779 }
780 }
781 #endif // COMPILER2
782
783 #if INCLUDE_JVMCI
784 // Register after c2 registration.
785 // JVMCI CompilerPhaseType idToPhase mapping is dynamic.
786 if (EnableJVMCI) {
787 JFR_ONLY(register_jfr_phasetype_serializer(compiler_jvmci);)
788 }
789 #endif // INCLUDE_JVMCI
790
791 if (CompilerOracle::should_collect_memstat()) {
792 CompilationMemoryStatistic::initialize();
793 }
794
795 // Start the compiler thread(s)
796 init_compiler_threads();
797 // totalTime performance counter is always created as it is required
798 // by the implementation of java.lang.management.CompilationMXBean.
799 {
800 // Ensure OOM leads to vm_exit_during_initialization.
801 EXCEPTION_MARK;
802 _perf_total_compilation =
803 PerfDataManager::create_counter(JAVA_CI, "totalTime",
804 PerfData::U_Ticks, CHECK);
805 }
806
807 if (UsePerfData) {
808
809 EXCEPTION_MARK;
810
811 // create the jvmstat performance counters
812 _perf_osr_compilation =
813 PerfDataManager::create_counter(SUN_CI, "osrTime",
814 PerfData::U_Ticks, CHECK);
815
816 _perf_standard_compilation =
817 PerfDataManager::create_counter(SUN_CI, "standardTime",
818 PerfData::U_Ticks, CHECK);
819
820 _perf_total_bailout_count =
821 PerfDataManager::create_counter(SUN_CI, "totalBailouts",
822 PerfData::U_Events, CHECK);
823
824 _perf_total_invalidated_count =
825 PerfDataManager::create_counter(SUN_CI, "totalInvalidates",
826 PerfData::U_Events, CHECK);
827
828 _perf_total_compile_count =
829 PerfDataManager::create_counter(SUN_CI, "totalCompiles",
830 PerfData::U_Events, CHECK);
831 _perf_total_osr_compile_count =
832 PerfDataManager::create_counter(SUN_CI, "osrCompiles",
833 PerfData::U_Events, CHECK);
834
835 _perf_total_standard_compile_count =
836 PerfDataManager::create_counter(SUN_CI, "standardCompiles",
837 PerfData::U_Events, CHECK);
838
839 _perf_sum_osr_bytes_compiled =
840 PerfDataManager::create_counter(SUN_CI, "osrBytes",
841 PerfData::U_Bytes, CHECK);
842
843 _perf_sum_standard_bytes_compiled =
844 PerfDataManager::create_counter(SUN_CI, "standardBytes",
845 PerfData::U_Bytes, CHECK);
846
847 _perf_sum_nmethod_size =
848 PerfDataManager::create_counter(SUN_CI, "nmethodSize",
849 PerfData::U_Bytes, CHECK);
850
851 _perf_sum_nmethod_code_size =
852 PerfDataManager::create_counter(SUN_CI, "nmethodCodeSize",
853 PerfData::U_Bytes, CHECK);
854
855 _perf_last_method =
856 PerfDataManager::create_string_variable(SUN_CI, "lastMethod",
857 CompilerCounters::cmname_buffer_length,
858 "", CHECK);
859
860 _perf_last_failed_method =
861 PerfDataManager::create_string_variable(SUN_CI, "lastFailedMethod",
862 CompilerCounters::cmname_buffer_length,
863 "", CHECK);
864
865 _perf_last_invalidated_method =
866 PerfDataManager::create_string_variable(SUN_CI, "lastInvalidatedMethod",
867 CompilerCounters::cmname_buffer_length,
868 "", CHECK);
869
870 _perf_last_compile_type =
871 PerfDataManager::create_variable(SUN_CI, "lastType",
872 PerfData::U_None,
873 (jlong)CompileBroker::no_compile,
874 CHECK);
875
876 _perf_last_compile_size =
877 PerfDataManager::create_variable(SUN_CI, "lastSize",
878 PerfData::U_Bytes,
879 (jlong)CompileBroker::no_compile,
880 CHECK);
881
882
883 _perf_last_failed_type =
884 PerfDataManager::create_variable(SUN_CI, "lastFailedType",
885 PerfData::U_None,
886 (jlong)CompileBroker::no_compile,
887 CHECK);
888
889 _perf_last_invalidated_type =
890 PerfDataManager::create_variable(SUN_CI, "lastInvalidatedType",
891 PerfData::U_None,
892 (jlong)CompileBroker::no_compile,
893 CHECK);
894 }
895
896 log_info(aot, codecache, init)("CompileBroker is initialized");
897 _initialized = true;
898 }
899
900 Handle CompileBroker::create_thread_oop(const char* name, TRAPS) {
901 Handle thread_oop = JavaThread::create_system_thread_object(name, CHECK_NH);
902 return thread_oop;
903 }
904
905 void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) {
906 CompilationPolicy::replay_training_at_init_loop(thread);
907 }
908
909 #if defined(ASSERT) && COMPILER2_OR_JVMCI
910 // Entry for DeoptimizeObjectsALotThread. The threads are started in
911 // CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled
912 void DeoptimizeObjectsALotThread::deopt_objs_alot_thread_entry(JavaThread* thread, TRAPS) {
913 DeoptimizeObjectsALotThread* dt = ((DeoptimizeObjectsALotThread*) thread);
914 bool enter_single_loop;
915 {
916 MonitorLocker ml(dt, EscapeBarrier_lock, Mutex::_no_safepoint_check_flag);
917 static int single_thread_count = 0;
918 enter_single_loop = single_thread_count++ < DeoptimizeObjectsALotThreadCountSingle;
919 }
920 if (enter_single_loop) {
921 dt->deoptimize_objects_alot_loop_single();
922 } else {
923 dt->deoptimize_objects_alot_loop_all();
924 }
925 }
926
927 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each
928 // barrier targets a single thread which is selected round robin.
929 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_single() {
930 HandleMark hm(this);
931 while (true) {
932 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *deoptee_thread = jtiwh.next(); ) {
933 { // Begin new scope for escape barrier
934 HandleMarkCleaner hmc(this);
935 ResourceMark rm(this);
936 EscapeBarrier eb(true, this, deoptee_thread);
937 eb.deoptimize_objects(100);
938 }
939 // Now sleep after the escape barriers destructor resumed deoptee_thread.
940 sleep(DeoptimizeObjectsALotInterval);
941 }
942 }
943 }
944
945 // Execute EscapeBarriers in an endless loop to revert optimizations based on escape analysis. Each
946 // barrier targets all java threads in the vm at once.
947 void DeoptimizeObjectsALotThread::deoptimize_objects_alot_loop_all() {
948 HandleMark hm(this);
949 while (true) {
950 { // Begin new scope for escape barrier
951 HandleMarkCleaner hmc(this);
952 ResourceMark rm(this);
953 EscapeBarrier eb(true, this);
954 eb.deoptimize_objects_all_threads();
955 }
956 // Now sleep after the escape barriers destructor resumed the java threads.
957 sleep(DeoptimizeObjectsALotInterval);
958 }
959 }
960 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI
961
962
963 JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD) {
964 Handle thread_oop(THREAD, JNIHandles::resolve_non_null(thread_handle));
965
966 if (java_lang_Thread::thread(thread_oop()) != nullptr) {
967 assert(type == compiler_t, "should only happen with reused compiler threads");
968 // The compiler thread hasn't actually exited yet so don't try to reuse it
969 return nullptr;
970 }
971
972 JavaThread* new_thread = nullptr;
973 switch (type) {
974 case compiler_t:
975 assert(comp != nullptr, "Compiler instance missing.");
976 if (!InjectCompilerCreationFailure || comp->num_compiler_threads() == 0) {
977 CompilerCounters* counters = new CompilerCounters();
978 new_thread = new CompilerThread(queue, counters);
979 }
980 break;
981 #if defined(ASSERT) && COMPILER2_OR_JVMCI
982 case deoptimizer_t:
983 new_thread = new DeoptimizeObjectsALotThread();
984 break;
985 #endif // ASSERT
986 case training_replay_t:
987 new_thread = new TrainingReplayThread();
988 break;
989 default:
990 ShouldNotReachHere();
991 }
992
993 // At this point the new CompilerThread data-races with this startup
994 // thread (which is the main thread and NOT the VM thread).
995 // This means Java bytecodes being executed at startup can
996 // queue compile jobs which will run at whatever default priority the
997 // newly created CompilerThread runs at.
998
999
1000 // At this point it may be possible that no osthread was created for the
1001 // JavaThread due to lack of resources. We will handle that failure below.
1002 // Also check new_thread so that static analysis is happy.
1003 if (new_thread != nullptr && new_thread->osthread() != nullptr) {
1004
1005 if (type == compiler_t) {
1006 CompilerThread::cast(new_thread)->set_compiler(comp);
1007 }
1008
1009 // Note that we cannot call os::set_priority because it expects Java
1010 // priorities and we are *explicitly* using OS priorities so that it's
1011 // possible to set the compiler thread priority higher than any Java
1012 // thread.
1013
1014 int native_prio = CompilerThreadPriority;
1015 if (native_prio == -1) {
1016 if (UseCriticalCompilerThreadPriority) {
1017 native_prio = os::java_to_os_priority[CriticalPriority];
1018 } else {
1019 native_prio = os::java_to_os_priority[NearMaxPriority];
1020 }
1021 }
1022 os::set_native_priority(new_thread, native_prio);
1023
1024 // Note that this only sets the JavaThread _priority field, which by
1025 // definition is limited to Java priorities and not OS priorities.
1026 JavaThread::start_internal_daemon(THREAD, new_thread, thread_oop, NearMaxPriority);
1027
1028 } else { // osthread initialization failure
1029 if (UseDynamicNumberOfCompilerThreads && type == compiler_t
1030 && comp->num_compiler_threads() > 0) {
1031 // The new thread is not known to Thread-SMR yet so we can just delete.
1032 delete new_thread;
1033 return nullptr;
1034 } else {
1035 vm_exit_during_initialization("java.lang.OutOfMemoryError",
1036 os::native_thread_creation_failed_msg());
1037 }
1038 }
1039
1040 os::naked_yield(); // make sure that the compiler thread is started early (especially helpful on SOLARIS)
1041
1042 return new_thread;
1043 }
1044
1045 static bool trace_compiler_threads() {
1046 LogTarget(Debug, jit, thread) lt;
1047 return TraceCompilerThreads || lt.is_enabled();
1048 }
1049
1050 static jobject create_compiler_thread(AbstractCompiler* compiler, int i, TRAPS) {
1051 char name_buffer[256];
1052 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", compiler->name(), i);
1053 Handle thread_oop = JavaThread::create_system_thread_object(name_buffer, CHECK_NULL);
1054 return JNIHandles::make_global(thread_oop);
1055 }
1056
1057 static void print_compiler_threads(stringStream& msg) {
1058 if (TraceCompilerThreads) {
1059 tty->print_cr("%7d %s", (int)tty->time_stamp().milliseconds(), msg.as_string());
1060 }
1061 LogTarget(Debug, jit, thread) lt;
1062 if (lt.is_enabled()) {
1063 LogStream ls(lt);
1064 ls.print_cr("%s", msg.as_string());
1065 }
1066 }
1067
1068 static void print_compiler_thread(JavaThread *ct) {
1069 if (trace_compiler_threads()) {
1070 ResourceMark rm;
1071 ThreadsListHandle tlh; // name() depends on the TLH.
1072 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1073 stringStream msg;
1074 msg.print("Added initial compiler thread %s", ct->name());
1075 print_compiler_threads(msg);
1076 }
1077 }
1078
1079 void CompileBroker::init_compiler_threads() {
1080 // Ensure any exceptions lead to vm_exit_during_initialization.
1081 EXCEPTION_MARK;
1082 #if !defined(ZERO)
1083 assert(_c2_count > 0 || _c1_count > 0, "No compilers?");
1084 #endif // !ZERO
1085 // Initialize the compilation queue
1086 if (_c2_count > 0) {
1087 const char* name = JVMCI_ONLY(UseJVMCICompiler ? "JVMCI compile queue" :) "C2 compile queue";
1088 _c2_compile_queue = new CompileQueue(name, MethodCompileQueueC2_lock);
1089 _compiler2_objects = NEW_C_HEAP_ARRAY(jobject, _c2_count, mtCompiler);
1090 _compiler2_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c2_count, mtCompiler);
1091 }
1092 if (_c1_count > 0) {
1093 _c1_compile_queue = new CompileQueue("C1 compile queue", MethodCompileQueueC1_lock);
1094 _compiler1_objects = NEW_C_HEAP_ARRAY(jobject, _c1_count, mtCompiler);
1095 _compiler1_logs = NEW_C_HEAP_ARRAY(CompileLog*, _c1_count, mtCompiler);
1096 }
1097
1098 if (_ac_count > 0) {
1099 if (_c1_count > 0) { // C1 is present
1100 _ac1_compile_queue = new CompileQueue("C1 AOT code compile queue", MethodCompileQueueSC1_lock);
1101 }
1102 if (_c2_count > 0) { // C2 is present
1103 _ac2_compile_queue = new CompileQueue("C2 AOT code compile queue", MethodCompileQueueSC2_lock);
1104 }
1105 _ac_objects = NEW_C_HEAP_ARRAY(jobject, _ac_count, mtCompiler);
1106 _ac_logs = NEW_C_HEAP_ARRAY(CompileLog*, _ac_count, mtCompiler);
1107 }
1108 char name_buffer[256];
1109
1110 for (int i = 0; i < _c2_count; i++) {
1111 // Create a name for our thread.
1112 jobject thread_handle = create_compiler_thread(_compilers[1], i, CHECK);
1113 _compiler2_objects[i] = thread_handle;
1114 _compiler2_logs[i] = nullptr;
1115
1116 if (!UseDynamicNumberOfCompilerThreads || i == 0) {
1117 JavaThread *ct = make_thread(compiler_t, thread_handle, _c2_compile_queue, _compilers[1], THREAD);
1118 assert(ct != nullptr, "should have been handled for initial thread");
1119 _compilers[1]->set_num_compiler_threads(i + 1);
1120 print_compiler_thread(ct);
1121 }
1122 }
1123
1124 for (int i = 0; i < _c1_count; i++) {
1125 // Create a name for our thread.
1126 jobject thread_handle = create_compiler_thread(_compilers[0], i, CHECK);
1127 _compiler1_objects[i] = thread_handle;
1128 _compiler1_logs[i] = nullptr;
1129
1130 if (!UseDynamicNumberOfCompilerThreads || i == 0) {
1131 JavaThread *ct = make_thread(compiler_t, thread_handle, _c1_compile_queue, _compilers[0], THREAD);
1132 assert(ct != nullptr, "should have been handled for initial thread");
1133 _compilers[0]->set_num_compiler_threads(i + 1);
1134 print_compiler_thread(ct);
1135 }
1136 }
1137
1138 if (_ac_count > 0) {
1139 int i = 0;
1140 if (_c1_count > 0) { // C1 is present
1141 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 1);
1142 Handle thread_oop = create_thread_oop(name_buffer, CHECK);
1143 jobject thread_handle = JNIHandles::make_global(thread_oop);
1144 _ac_objects[i] = thread_handle;
1145 _ac_logs[i] = nullptr;
1146 i++;
1147
1148 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac1_compile_queue, _compilers[0], THREAD);
1149 assert(ct != nullptr, "should have been handled for initial thread");
1150 print_compiler_thread(ct);
1151 }
1152 if (_c2_count > 0) { // C2 is present
1153 os::snprintf_checked(name_buffer, sizeof(name_buffer), "C%d AOT code caching CompilerThread", 2);
1154 Handle thread_oop = create_thread_oop(name_buffer, CHECK);
1155 jobject thread_handle = JNIHandles::make_global(thread_oop);
1156 _ac_objects[i] = thread_handle;
1157 _ac_logs[i] = nullptr;
1158
1159 JavaThread *ct = make_thread(compiler_t, thread_handle, _ac2_compile_queue, _compilers[1], THREAD);
1160 assert(ct != nullptr, "should have been handled for initial thread");
1161 print_compiler_thread(ct);
1162 }
1163 }
1164
1165 if (UsePerfData) {
1166 PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, _c1_count + _c2_count, CHECK);
1167 }
1168
1169 #if defined(ASSERT) && COMPILER2_OR_JVMCI
1170 if (DeoptimizeObjectsALot) {
1171 // Initialize and start the object deoptimizer threads
1172 const int total_count = DeoptimizeObjectsALotThreadCountSingle + DeoptimizeObjectsALotThreadCountAll;
1173 for (int count = 0; count < total_count; count++) {
1174 Handle thread_oop = JavaThread::create_system_thread_object("Deoptimize objects a lot single mode", CHECK);
1175 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
1176 make_thread(deoptimizer_t, thread_handle, nullptr, nullptr, THREAD);
1177 }
1178 }
1179 #endif // defined(ASSERT) && COMPILER2_OR_JVMCI
1180 }
1181
1182 void CompileBroker::init_training_replay() {
1183 // Ensure any exceptions lead to vm_exit_during_initialization.
1184 EXCEPTION_MARK;
1185 if (TrainingData::have_data()) {
1186 Handle thread_oop = create_thread_oop("Training replay thread", CHECK);
1187 jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
1188 make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD);
1189 }
1190 }
1191
1192 void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) {
1193
1194 int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0;
1195 const int c2_tasks_per_thread = 2, c1_tasks_per_thread = 4;
1196
1197 // Quick check if we already have enough compiler threads without taking the lock.
1198 // Numbers may change concurrently, so we read them again after we have the lock.
1199 if (_c2_compile_queue != nullptr) {
1200 old_c2_count = get_c2_thread_count();
1201 new_c2_count = MIN2(_c2_count, _c2_compile_queue->size() / c2_tasks_per_thread);
1202 }
1203 if (_c1_compile_queue != nullptr) {
1204 old_c1_count = get_c1_thread_count();
1205 new_c1_count = MIN2(_c1_count, _c1_compile_queue->size() / c1_tasks_per_thread);
1206 }
1207 if (new_c2_count <= old_c2_count && new_c1_count <= old_c1_count) return;
1208
1209 // Now, we do the more expensive operations.
1210 physical_memory_size_type free_memory = 0;
1211 // Return value ignored - defaulting to 0 on failure.
1212 (void)os::free_memory(free_memory);
1213 // If SegmentedCodeCache is off, both values refer to the single heap (with type CodeBlobType::All).
1214 size_t available_cc_np = CodeCache::unallocated_capacity(CodeBlobType::MethodNonProfiled),
1215 available_cc_p = CodeCache::unallocated_capacity(CodeBlobType::MethodProfiled);
1216
1217 // Only attempt to start additional threads if the lock is free.
1218 if (!CompileThread_lock->try_lock()) return;
1219
1220 if (_c2_compile_queue != nullptr) {
1221 old_c2_count = get_c2_thread_count();
1222 new_c2_count = MIN4(_c2_count,
1223 _c2_compile_queue->size() / c2_tasks_per_thread,
1224 (int)(free_memory / (200*M)),
1225 (int)(available_cc_np / (128*K)));
1226
1227 for (int i = old_c2_count; i < new_c2_count; i++) {
1228 #if INCLUDE_JVMCI
1229 if (UseJVMCICompiler && !UseJVMCINativeLibrary && _compiler2_objects[i] == nullptr) {
1230 // Native compiler threads as used in C1/C2 can reuse the j.l.Thread objects as their
1231 // existence is completely hidden from the rest of the VM (and those compiler threads can't
1232 // call Java code to do the creation anyway).
1233 //
1234 // For pure Java JVMCI we have to create new j.l.Thread objects as they are visible and we
1235 // can see unexpected thread lifecycle transitions if we bind them to new JavaThreads. For
1236 // native library JVMCI it's preferred to use the C1/C2 strategy as this avoids unnecessary
1237 // coupling with Java.
1238 if (!THREAD->can_call_java()) break;
1239 char name_buffer[256];
1240 os::snprintf_checked(name_buffer, sizeof(name_buffer), "%s CompilerThread%d", _compilers[1]->name(), i);
1241 Handle thread_oop;
1242 {
1243 // We have to give up the lock temporarily for the Java calls.
1244 MutexUnlocker mu(CompileThread_lock);
1245 thread_oop = JavaThread::create_system_thread_object(name_buffer, THREAD);
1246 }
1247 if (HAS_PENDING_EXCEPTION) {
1248 if (trace_compiler_threads()) {
1249 ResourceMark rm;
1250 stringStream msg;
1251 msg.print_cr("JVMCI compiler thread creation failed:");
1252 PENDING_EXCEPTION->print_on(&msg);
1253 print_compiler_threads(msg);
1254 }
1255 CLEAR_PENDING_EXCEPTION;
1256 break;
1257 }
1258 // Check if another thread has beaten us during the Java calls.
1259 if (get_c2_thread_count() != i) break;
1260 jobject thread_handle = JNIHandles::make_global(thread_oop);
1261 assert(compiler2_object(i) == nullptr, "Old one must be released!");
1262 _compiler2_objects[i] = thread_handle;
1263 }
1264 #endif
1265 guarantee(compiler2_object(i) != nullptr, "Thread oop must exist");
1266 JavaThread *ct = make_thread(compiler_t, compiler2_object(i), _c2_compile_queue, _compilers[1], THREAD);
1267 if (ct == nullptr) break;
1268 _compilers[1]->set_num_compiler_threads(i + 1);
1269 if (trace_compiler_threads()) {
1270 ResourceMark rm;
1271 ThreadsListHandle tlh; // name() depends on the TLH.
1272 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1273 stringStream msg;
1274 msg.print("Added compiler thread %s (free memory: %dMB, available non-profiled code cache: %dMB)",
1275 ct->name(), (int)(free_memory/M), (int)(available_cc_np/M));
1276 print_compiler_threads(msg);
1277 }
1278 }
1279 }
1280
1281 if (_c1_compile_queue != nullptr) {
1282 old_c1_count = get_c1_thread_count();
1283 new_c1_count = MIN4(_c1_count,
1284 _c1_compile_queue->size() / c1_tasks_per_thread,
1285 (int)(free_memory / (100*M)),
1286 (int)(available_cc_p / (128*K)));
1287
1288 for (int i = old_c1_count; i < new_c1_count; i++) {
1289 JavaThread *ct = make_thread(compiler_t, compiler1_object(i), _c1_compile_queue, _compilers[0], THREAD);
1290 if (ct == nullptr) break;
1291 _compilers[0]->set_num_compiler_threads(i + 1);
1292 if (trace_compiler_threads()) {
1293 ResourceMark rm;
1294 ThreadsListHandle tlh; // name() depends on the TLH.
1295 assert(tlh.includes(ct), "ct=" INTPTR_FORMAT " exited unexpectedly.", p2i(ct));
1296 stringStream msg;
1297 msg.print("Added compiler thread %s (free memory: %dMB, available profiled code cache: %dMB)",
1298 ct->name(), (int)(free_memory/M), (int)(available_cc_p/M));
1299 print_compiler_threads(msg);
1300 }
1301 }
1302 }
1303
1304 CompileThread_lock->unlock();
1305 }
1306
1307
1308 /**
1309 * Set the methods on the stack as on_stack so that redefine classes doesn't
1310 * reclaim them. This method is executed at a safepoint.
1311 */
1312 void CompileBroker::mark_on_stack() {
1313 assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
1314 // Since we are at a safepoint, we do not need a lock to access
1315 // the compile queues.
1316 if (_c2_compile_queue != nullptr) {
1317 _c2_compile_queue->mark_on_stack();
1318 }
1319 if (_c1_compile_queue != nullptr) {
1320 _c1_compile_queue->mark_on_stack();
1321 }
1322 if (_ac1_compile_queue != nullptr) {
1323 _ac1_compile_queue->mark_on_stack();
1324 }
1325 if (_ac2_compile_queue != nullptr) {
1326 _ac2_compile_queue->mark_on_stack();
1327 }
1328 }
1329
1330 // ------------------------------------------------------------------
1331 // CompileBroker::compile_method
1332 //
1333 // Request compilation of a method.
1334 void CompileBroker::compile_method_base(const methodHandle& method,
1335 int osr_bci,
1336 int comp_level,
1337 int hot_count,
1338 CompileTask::CompileReason compile_reason,
1339 bool requires_online_compilation,
1340 bool blocking,
1341 Thread* thread) {
1342 guarantee(!method->is_abstract(), "cannot compile abstract methods");
1343 assert(method->method_holder()->is_instance_klass(),
1344 "sanity check");
1345 assert(!method->method_holder()->is_not_initialized() ||
1346 compile_reason == CompileTask::Reason_Preload ||
1347 compile_reason == CompileTask::Reason_Precompile ||
1348 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized");
1349 assert(!method->is_method_handle_intrinsic(), "do not enqueue these guys");
1350
1351 if (CIPrintRequests) {
1352 tty->print("request: ");
1353 method->print_short_name(tty);
1354 if (osr_bci != InvocationEntryBci) {
1355 tty->print(" osr_bci: %d", osr_bci);
1356 }
1357 tty->print(" level: %d comment: %s count: %d", comp_level, CompileTask::reason_name(compile_reason), hot_count);
1358 if (hot_count > 0) {
1359 tty->print(" hot: yes");
1360 }
1361 tty->cr();
1362 }
1363
1364 // A request has been made for compilation. Before we do any
1365 // real work, check to see if the method has been compiled
1366 // in the meantime with a definitive result.
1367 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1368 return;
1369 }
1370
1371 #ifndef PRODUCT
1372 if (osr_bci != -1 && !FLAG_IS_DEFAULT(OSROnlyBCI)) {
1373 if ((OSROnlyBCI > 0) ? (OSROnlyBCI != osr_bci) : (-OSROnlyBCI == osr_bci)) {
1374 // Positive OSROnlyBCI means only compile that bci. Negative means don't compile that BCI.
1375 return;
1376 }
1377 }
1378 #endif
1379
1380 // If this method is already in the compile queue, then
1381 // we do not block the current thread.
1382 if (compilation_is_in_queue(method)) {
1383 // We may want to decay our counter a bit here to prevent
1384 // multiple denied requests for compilation. This is an
1385 // open compilation policy issue. Note: The other possibility,
1386 // in the case that this is a blocking compile request, is to have
1387 // all subsequent blocking requesters wait for completion of
1388 // ongoing compiles. Note that in this case we'll need a protocol
1389 // for freeing the associated compile tasks. [Or we could have
1390 // a single static monitor on which all these waiters sleep.]
1391 return;
1392 }
1393
1394 // Tiered policy requires MethodCounters to exist before adding a method to
1395 // the queue. Create if we don't have them yet.
1396 if (compile_reason != CompileTask::Reason_Preload) {
1397 method->get_method_counters(thread);
1398 }
1399
1400 AOTCodeEntry* aot_code_entry = find_aot_code_entry(method, osr_bci, comp_level, compile_reason, requires_online_compilation);
1401 bool is_aot = (aot_code_entry != nullptr);
1402 requires_online_compilation = !is_aot; // Request JIT compilation
1403
1404 // Outputs from the following MutexLocker block:
1405 CompileTask* task = nullptr;
1406 CompileQueue* queue = compile_queue(comp_level, is_aot);
1407
1408 // Acquire our lock.
1409 {
1410 ConditionalMutexLocker locker(thread, queue->lock(), !UseLockFreeCompileQueues);
1411
1412 // Make sure the method has not slipped into the queues since
1413 // last we checked; note that those checks were "fast bail-outs".
1414 // Here we need to be more careful, see 14012000 below.
1415 if (compilation_is_in_queue(method)) {
1416 return;
1417 }
1418
1419 // We need to check again to see if the compilation has
1420 // completed. A previous compilation may have registered
1421 // some result.
1422 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1423 return;
1424 }
1425
1426 // We now know that this compilation is not pending, complete,
1427 // or prohibited. Assign a compile_id to this compilation
1428 // and check to see if it is in our [Start..Stop) range.
1429 int compile_id = assign_compile_id(method, osr_bci);
1430 if (compile_id == 0) {
1431 // The compilation falls outside the allowed range.
1432 return;
1433 }
1434
1435 #if INCLUDE_JVMCI
1436 if (UseJVMCICompiler && blocking) {
1437 // Don't allow blocking compiles for requests triggered by JVMCI.
1438 if (thread->is_Compiler_thread()) {
1439 blocking = false;
1440 }
1441
1442 // In libjvmci, JVMCI initialization should not deadlock with other threads
1443 if (!UseJVMCINativeLibrary) {
1444 // Don't allow blocking compiles if inside a class initializer or while performing class loading
1445 vframeStream vfst(JavaThread::cast(thread));
1446 for (; !vfst.at_end(); vfst.next()) {
1447 if (vfst.method()->is_static_initializer() ||
1448 (vfst.method()->method_holder()->is_subclass_of(vmClasses::ClassLoader_klass()) &&
1449 vfst.method()->name() == vmSymbols::loadClass_name())) {
1450 blocking = false;
1451 break;
1452 }
1453 }
1454
1455 // Don't allow blocking compilation requests to JVMCI
1456 // if JVMCI itself is not yet initialized
1457 if (!JVMCI::is_compiler_initialized() && compiler(comp_level)->is_jvmci()) {
1458 blocking = false;
1459 }
1460 }
1461
1462 // Don't allow blocking compilation requests if we are in JVMCIRuntime::shutdown
1463 // to avoid deadlock between compiler thread(s) and threads run at shutdown
1464 // such as the DestroyJavaVM thread.
1465 if (JVMCI::in_shutdown()) {
1466 blocking = false;
1467 }
1468 }
1469 #endif // INCLUDE_JVMCI
1470
1471 // We will enter the compilation in the queue.
1472 // 14012000: Note that this sets the queued_for_compile bits in
1473 // the target method. We can now reason that a method cannot be
1474 // queued for compilation more than once, as follows:
1475 // Before a thread queues a task for compilation, it first acquires
1476 // the compile queue lock, then checks if the method's queued bits
1477 // are set or it has already been compiled. Thus there can not be two
1478 // instances of a compilation task for the same method on the
1479 // compilation queue. Consider now the case where the compilation
1480 // thread has already removed a task for that method from the queue
1481 // and is in the midst of compiling it. In this case, the
1482 // queued_for_compile bits must be set in the method (and these
1483 // will be visible to the current thread, since the bits were set
1484 // under protection of the compile queue lock, which we hold now.
1485 // When the compilation completes, the compiler thread first sets
1486 // the compilation result and then clears the queued_for_compile
1487 // bits. Neither of these actions are protected by a barrier (or done
1488 // under the protection of a lock), so the only guarantee we have
1489 // (on machines with TSO (Total Store Order)) is that these values
1490 // will update in that order. As a result, the only combinations of
1491 // these bits that the current thread will see are, in temporal order:
1492 // <RESULT, QUEUE> :
1493 // <0, 1> : in compile queue, but not yet compiled
1494 // <1, 1> : compiled but queue bit not cleared
1495 // <1, 0> : compiled and queue bit cleared
1496 // Because we first check the queue bits then check the result bits,
1497 // we are assured that we cannot introduce a duplicate task.
1498 // Note that if we did the tests in the reverse order (i.e. check
1499 // result then check queued bit), we could get the result bit before
1500 // the compilation completed, and the queue bit after the compilation
1501 // completed, and end up introducing a "duplicate" (redundant) task.
1502 // In that case, the compiler thread should first check if a method
1503 // has already been compiled before trying to compile it.
1504 // NOTE: in the event that there are multiple compiler threads and
1505 // there is de-optimization/recompilation, things will get hairy,
1506 // and in that case it's best to protect both the testing (here) of
1507 // these bits, and their updating (here and elsewhere) under a
1508 // common lock.
1509 task = create_compile_task(queue,
1510 compile_id, method,
1511 osr_bci, comp_level,
1512 hot_count, aot_code_entry, compile_reason,
1513 requires_online_compilation, blocking);
1514
1515 if (task->is_aot_load() && (_ac_count > 0)) {
1516 // Put it on AOT code caching queue
1517 queue = is_c1_compile(comp_level) ? _ac1_compile_queue : _ac2_compile_queue;
1518 }
1519
1520 if (UseLockFreeCompileQueues) {
1521 assert(queue->lock()->owned_by_self() == false, "");
1522 queue->add_pending(task);
1523 } else {
1524 queue->add(task);
1525 }
1526 }
1527
1528 if (blocking) {
1529 wait_for_completion(task);
1530 }
1531 }
1532
1533 AOTCodeEntry* CompileBroker::find_aot_code_entry(const methodHandle& method, int osr_bci, int comp_level,
1534 CompileTask::CompileReason compile_reason,
1535 bool requires_online_compilation) {
1536 if (requires_online_compilation || compile_reason == CompileTask::Reason_Whitebox) {
1537 return nullptr; // Need normal JIT compilation
1538 }
1539 AOTCodeEntry* aot_code_entry = nullptr;
1540 if (osr_bci == InvocationEntryBci && AOTCodeCache::is_using_code()) {
1541 // Check for AOT preload code first.
1542 if (compile_reason == CompileTask::Reason_Preload) {
1543 aot_code_entry = method->aot_code_entry();
1544 assert(aot_code_entry != nullptr && aot_code_entry->for_preload(), "sanity");
1545 } else {
1546 aot_code_entry = AOTCodeCache::find_code_entry(method, comp_level);
1547 }
1548 }
1549 return aot_code_entry;
1550 }
1551
1552 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
1553 int comp_level,
1554 int hot_count,
1555 bool requires_online_compilation,
1556 CompileTask::CompileReason compile_reason,
1557 TRAPS) {
1558 // Do nothing if compilebroker is not initialized or compiles are submitted on level none
1559 if (!_initialized || comp_level == CompLevel_none) {
1560 return nullptr;
1561 }
1562
1563 AbstractCompiler *comp = CompileBroker::compiler(comp_level);
1564 assert(comp != nullptr, "Ensure we have a compiler");
1565
1566 #if INCLUDE_JVMCI
1567 if (comp->is_jvmci() && !JVMCI::can_initialize_JVMCI()) {
1568 // JVMCI compilation is not yet initializable.
1569 return nullptr;
1570 }
1571 #endif
1572
1573 DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, comp);
1574 // CompileBroker::compile_method can trap and can have pending async exception.
1575 nmethod* nm = CompileBroker::compile_method(method, osr_bci, comp_level, hot_count, requires_online_compilation, compile_reason, directive, THREAD);
1576 DirectivesStack::release(directive);
1577 return nm;
1578 }
1579
1580 nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
1581 int comp_level,
1582 int hot_count,
1583 bool requires_online_compilation,
1584 CompileTask::CompileReason compile_reason,
1585 DirectiveSet* directive,
1586 TRAPS) {
1587
1588 // make sure arguments make sense
1589 assert(method->method_holder()->is_instance_klass(), "not an instance method");
1590 assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
1591 assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
1592 assert(!method->method_holder()->is_not_initialized() ||
1593 compile_reason == CompileTask::Reason_Preload ||
1594 compile_reason == CompileTask::Reason_Precompile ||
1595 compile_reason == CompileTask::Reason_PrecompileForPreload, "method holder must be initialized");
1596 // return quickly if possible
1597 bool aot_compilation = (PrecompileCode && PrecompileOnlyAndExit) ||
1598 CDSConfig::is_dumping_aot_code();
1599 if (aot_compilation && !CompileTask::reason_is_precompile(compile_reason)) {
1600 // Skip normal compilations when compiling AOT code
1601 return nullptr;
1602 }
1603
1604 // lock, make sure that the compilation
1605 // isn't prohibited in a straightforward way.
1606 AbstractCompiler* comp = CompileBroker::compiler(comp_level);
1607 if (comp == nullptr || compilation_is_prohibited(method, osr_bci, comp_level, directive->ExcludeOption)) {
1608 return nullptr;
1609 }
1610
1611 if (osr_bci == InvocationEntryBci) {
1612 // standard compilation
1613 nmethod* method_code = method->code();
1614 if (method_code != nullptr) {
1615 if (compilation_is_complete(method(), osr_bci, comp_level, requires_online_compilation, compile_reason)) {
1616 return method_code;
1617 }
1618 }
1619 if (method->is_not_compilable(comp_level)) {
1620 return nullptr;
1621 }
1622 } else {
1623 // osr compilation
1624 // We accept a higher level osr method
1625 nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
1626 if (nm != nullptr) return nm;
1627 if (method->is_not_osr_compilable(comp_level)) return nullptr;
1628 }
1629
1630 assert(!HAS_PENDING_EXCEPTION, "No exception should be present");
1631 // some prerequisites that are compiler specific
1632 if (compile_reason != CompileTask::Reason_Preload &&
1633 !CompileTask::reason_is_precompile(compile_reason) &&
1634 (comp->is_c2() || comp->is_jvmci())) {
1635 InternalOOMEMark iom(THREAD);
1636 method->constants()->resolve_string_constants(CHECK_AND_CLEAR_NONASYNC_NULL);
1637 // Resolve all classes seen in the signature of the method
1638 // we are compiling.
1639 Method::load_signature_classes(method, CHECK_AND_CLEAR_NONASYNC_NULL);
1640 }
1641
1642 // If the method is native, do the lookup in the thread requesting
1643 // the compilation. Native lookups can load code, which is not
1644 // permitted during compilation.
1645 //
1646 // Note: A native method implies non-osr compilation which is
1647 // checked with an assertion at the entry of this method.
1648 if (method->is_native() && !method->is_method_handle_intrinsic()) {
1649 address adr = NativeLookup::lookup(method, THREAD);
1650 if (HAS_PENDING_EXCEPTION) {
1651 // In case of an exception looking up the method, we just forget
1652 // about it. The interpreter will kick-in and throw the exception.
1653 method->set_not_compilable("NativeLookup::lookup failed"); // implies is_not_osr_compilable()
1654 CLEAR_PENDING_EXCEPTION;
1655 return nullptr;
1656 }
1657 assert(method->has_native_function(), "must have native code by now");
1658 }
1659
1660 // RedefineClasses() has replaced this method; just return
1661 if (method->is_old()) {
1662 return nullptr;
1663 }
1664
1665 // JVMTI -- post_compile_event requires jmethod_id() that may require
1666 // a lock the compiling thread can not acquire. Prefetch it here.
1667 if (JvmtiExport::should_post_compiled_method_load()) {
1668 method->jmethod_id();
1669 }
1670
1671 // do the compilation
1672 if (method->is_native()) {
1673 if (!PreferInterpreterNativeStubs || method->is_method_handle_intrinsic()) {
1674 // To properly handle the appendix argument for out-of-line calls we are using a small trampoline that
1675 // pops off the appendix argument and jumps to the target (see gen_special_dispatch in SharedRuntime).
1676 //
1677 // Since normal compiled-to-compiled calls are not able to handle such a thing we MUST generate an adapter
1678 // in this case. If we can't generate one and use it we can not execute the out-of-line method handle calls.
1679 AdapterHandlerLibrary::create_native_wrapper(method);
1680 } else {
1681 return nullptr;
1682 }
1683 } else {
1684 // If the compiler is shut off due to code cache getting full
1685 // fail out now so blocking compiles dont hang the java thread
1686 if (!should_compile_new_jobs()) {
1687 return nullptr;
1688 }
1689 bool is_blocking = ReplayCompiles ||
1690 !directive->BackgroundCompilationOption ||
1691 (PreloadBlocking && (compile_reason == CompileTask::Reason_Preload));
1692 compile_method_base(method, osr_bci, comp_level, hot_count, compile_reason, requires_online_compilation, is_blocking, THREAD);
1693 }
1694
1695 // return requested nmethod
1696 // We accept a higher level osr method
1697 if (osr_bci == InvocationEntryBci) {
1698 return method->code();
1699 }
1700 return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
1701 }
1702
1703
1704 // ------------------------------------------------------------------
1705 // CompileBroker::compilation_is_complete
1706 //
1707 // See if compilation of this method is already complete.
1708 bool CompileBroker::compilation_is_complete(Method* method,
1709 int osr_bci,
1710 int comp_level,
1711 bool online_only,
1712 CompileTask::CompileReason compile_reason) {
1713 if (compile_reason == CompileTask::Reason_Precompile ||
1714 compile_reason == CompileTask::Reason_PrecompileForPreload) {
1715 return false; // FIXME: any restrictions?
1716 }
1717 bool is_osr = (osr_bci != standard_entry_bci);
1718 if (is_osr) {
1719 if (method->is_not_osr_compilable(comp_level)) {
1720 return true;
1721 } else {
1722 nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
1723 return (result != nullptr);
1724 }
1725 } else {
1726 if (method->is_not_compilable(comp_level)) {
1727 return true;
1728 } else {
1729 nmethod* result = method->code();
1730 if (result == nullptr) {
1731 return false;
1732 }
1733 if (online_only && result->is_aot()) {
1734 return false;
1735 }
1736 bool same_level = (comp_level == result->comp_level());
1737 if (result->preloaded() || result->has_clinit_barriers()) {
1738 return !same_level; // Allow replace preloaded code with new code of the same level
1739 }
1740 return same_level;
1741 }
1742 }
1743 }
1744
1745
1746 /**
1747 * See if this compilation is already requested.
1748 *
1749 * Implementation note: there is only a single "is in queue" bit
1750 * for each method. This means that the check below is overly
1751 * conservative in the sense that an osr compilation in the queue
1752 * will block a normal compilation from entering the queue (and vice
1753 * versa). This can be remedied by a full queue search to disambiguate
1754 * cases. If it is deemed profitable, this may be done.
1755 */
1756 bool CompileBroker::compilation_is_in_queue(const methodHandle& method) {
1757 return method->queued_for_compilation();
1758 }
1759
1760 // ------------------------------------------------------------------
1761 // CompileBroker::compilation_is_prohibited
1762 //
1763 // See if this compilation is not allowed.
1764 bool CompileBroker::compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded) {
1765 bool is_native = method->is_native();
1766 // Some compilers may not support the compilation of natives.
1767 AbstractCompiler *comp = compiler(comp_level);
1768 if (is_native && (!CICompileNatives || comp == nullptr)) {
1769 method->set_not_compilable_quietly("native methods not supported", comp_level);
1770 return true;
1771 }
1772
1773 bool is_osr = (osr_bci != standard_entry_bci);
1774 // Some compilers may not support on stack replacement.
1775 if (is_osr && (!CICompileOSR || comp == nullptr)) {
1776 method->set_not_osr_compilable("OSR not supported", comp_level);
1777 return true;
1778 }
1779
1780 // The method may be explicitly excluded by the user.
1781 double scale;
1782 if (excluded || (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, scale) && scale == 0)) {
1783 bool quietly = CompilerOracle::be_quiet();
1784 if (PrintCompilation && !quietly) {
1785 // This does not happen quietly...
1786 ResourceMark rm;
1787 tty->print("### Excluding %s:%s",
1788 method->is_native() ? "generation of native wrapper" : "compile",
1789 (method->is_static() ? " static" : ""));
1790 method->print_short_name(tty);
1791 tty->cr();
1792 }
1793 method->set_not_compilable("excluded by CompileCommand", comp_level, !quietly);
1794 }
1795
1796 return false;
1797 }
1798
1799 /**
1800 * Generate serialized IDs for compilation requests. If certain debugging flags are used
1801 * and the ID is not within the specified range, the method is not compiled and 0 is returned.
1802 * The function also allows to generate separate compilation IDs for OSR compilations.
1803 */
1804 int CompileBroker::assign_compile_id(const methodHandle& method, int osr_bci) {
1805 #ifdef ASSERT
1806 bool is_osr = (osr_bci != standard_entry_bci);
1807 int id;
1808 if (method->is_native()) {
1809 assert(!is_osr, "can't be osr");
1810 // Adapters, native wrappers and method handle intrinsics
1811 // should be generated always.
1812 return AtomicAccess::add(CICountNative ? &_native_compilation_id : &_compilation_id, 1);
1813 } else if (CICountOSR && is_osr) {
1814 id = AtomicAccess::add(&_osr_compilation_id, 1);
1815 if (CIStartOSR <= id && id < CIStopOSR) {
1816 return id;
1817 }
1818 } else {
1819 id = AtomicAccess::add(&_compilation_id, 1);
1820 if (CIStart <= id && id < CIStop) {
1821 return id;
1822 }
1823 }
1824
1825 // Method was not in the appropriate compilation range.
1826 method->set_not_compilable_quietly("Not in requested compile id range");
1827 return 0;
1828 #else
1829 // CICountOSR is a develop flag and set to 'false' by default. In a product built,
1830 // only _compilation_id is incremented.
1831 return AtomicAccess::add(&_compilation_id, 1);
1832 #endif
1833 }
1834
1835 // ------------------------------------------------------------------
1836 // CompileBroker::assign_compile_id_unlocked
1837 //
1838 // Public wrapper for assign_compile_id that acquires the needed locks
1839 int CompileBroker::assign_compile_id_unlocked(Thread* thread, const methodHandle& method, int osr_bci) {
1840 return assign_compile_id(method, osr_bci);
1841 }
1842
1843 // ------------------------------------------------------------------
1844 // CompileBroker::create_compile_task
1845 //
1846 // Create a CompileTask object representing the current request for
1847 // compilation. Add this task to the queue.
1848 CompileTask* CompileBroker::create_compile_task(CompileQueue* queue,
1849 int compile_id,
1850 const methodHandle& method,
1851 int osr_bci,
1852 int comp_level,
1853 int hot_count,
1854 AOTCodeEntry* aot_code_entry,
1855 CompileTask::CompileReason compile_reason,
1856 bool requires_online_compilation,
1857 bool blocking) {
1858 CompileTask* new_task = new CompileTask(compile_id, method, osr_bci, comp_level,
1859 hot_count, aot_code_entry, compile_reason, queue,
1860 requires_online_compilation, blocking);
1861 return new_task;
1862 }
1863
1864 #if INCLUDE_JVMCI
1865 // The number of milliseconds to wait before checking if
1866 // JVMCI compilation has made progress.
1867 static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000;
1868
1869 // The number of JVMCI compilation progress checks that must fail
1870 // before unblocking a thread waiting for a blocking compilation.
1871 static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
1872
1873 /**
1874 * Waits for a JVMCI compiler to complete a given task. This thread
1875 * waits until either the task completes or it sees no JVMCI compilation
1876 * progress for N consecutive milliseconds where N is
1877 * JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE *
1878 * JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS.
1879 *
1880 * @return true if this thread needs to delete the task
1881 */
1882 bool CompileBroker::wait_for_jvmci_completion(JVMCICompiler* jvmci, CompileTask* task, JavaThread* thread) {
1883 assert(UseJVMCICompiler, "sanity");
1884 MonitorLocker ml(thread, CompileTaskWait_lock);
1885 int progress_wait_attempts = 0;
1886 jint thread_jvmci_compilation_ticks = 0;
1887 jint global_jvmci_compilation_ticks = jvmci->global_compilation_ticks();
1888 while (!task->is_complete() && !is_compilation_disabled_forever() &&
1889 ml.wait(JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE)) {
1890 JVMCICompileState* jvmci_compile_state = task->blocking_jvmci_compile_state();
1891
1892 bool progress;
1893 if (jvmci_compile_state != nullptr) {
1894 jint ticks = jvmci_compile_state->compilation_ticks();
1895 progress = (ticks - thread_jvmci_compilation_ticks) != 0;
1896 JVMCI_event_1("waiting on compilation %d [ticks=%d]", task->compile_id(), ticks);
1897 thread_jvmci_compilation_ticks = ticks;
1898 } else {
1899 // Still waiting on JVMCI compiler queue. This thread may be holding a lock
1900 // that all JVMCI compiler threads are blocked on. We use the global JVMCI
1901 // compilation ticks to determine whether JVMCI compilation
1902 // is still making progress through the JVMCI compiler queue.
1903 jint ticks = jvmci->global_compilation_ticks();
1904 progress = (ticks - global_jvmci_compilation_ticks) != 0;
1905 JVMCI_event_1("waiting on compilation %d to be queued [ticks=%d]", task->compile_id(), ticks);
1906 global_jvmci_compilation_ticks = ticks;
1907 }
1908
1909 if (!progress) {
1910 if (++progress_wait_attempts == JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS) {
1911 if (PrintCompilation) {
1912 task->print(tty, "wait for blocking compilation timed out");
1913 }
1914 JVMCI_event_1("waiting on compilation %d timed out", task->compile_id());
1915 break;
1916 }
1917 } else {
1918 progress_wait_attempts = 0;
1919 }
1920 }
1921 task->clear_waiter();
1922 return task->is_complete();
1923 }
1924 #endif
1925
1926 /**
1927 * Wait for the compilation task to complete.
1928 */
1929 void CompileBroker::wait_for_completion(CompileTask* task) {
1930 if (CIPrintCompileQueue) {
1931 ttyLocker ttyl;
1932 tty->print_cr("BLOCKING FOR COMPILE");
1933 }
1934
1935 assert(task->is_blocking(), "can only wait on blocking task");
1936
1937 JavaThread* thread = JavaThread::current();
1938
1939 methodHandle method(thread, task->method());
1940 bool free_task;
1941 #if INCLUDE_JVMCI
1942 AbstractCompiler* comp = compiler(task->comp_level());
1943 if (!UseJVMCINativeLibrary && comp->is_jvmci() && !task->should_wait_for_compilation()) {
1944 // It may return before compilation is completed.
1945 // Note that libjvmci should not pre-emptively unblock
1946 // a thread waiting for a compilation as it does not call
1947 // Java code and so is not deadlock prone like jarjvmci.
1948 free_task = wait_for_jvmci_completion((JVMCICompiler*) comp, task, thread);
1949 } else
1950 #endif
1951 {
1952 free_task = true;
1953 // Wait until the task is complete or compilation is shut down.
1954 MonitorLocker ml(thread, CompileTaskWait_lock);
1955 while (!task->is_complete() && !is_compilation_disabled_forever()) {
1956 ml.wait();
1957 }
1958 }
1959
1960 // It is harmless to check this status without the lock, because
1961 // completion is a stable property.
1962 if (!task->is_complete()) {
1963 // Task is not complete, likely because we are exiting for compilation
1964 // shutdown. The task can still be reached through the queue, or executed
1965 // by some compiler thread. There is no coordination with either MCQ lock
1966 // holders or compilers, therefore we cannot delete the task.
1967 //
1968 // This will leave task allocated, which leaks it. At this (degraded) point,
1969 // it is less risky to abandon the task, rather than attempting a more
1970 // complicated deletion protocol.
1971 free_task = false;
1972 }
1973
1974 if (free_task) {
1975 assert(task->is_complete(), "Compilation should have completed");
1976 assert(task->next() == nullptr && task->prev() == nullptr,
1977 "Completed task should not be in the queue");
1978
1979 // By convention, the waiter is responsible for deleting a
1980 // blocking CompileTask. Since there is only one waiter ever
1981 // waiting on a CompileTask, we know that no one else will
1982 // be using this CompileTask; we can delete it.
1983 delete task;
1984 }
1985 }
1986
1987 void CompileBroker::wait_for_no_active_tasks() {
1988 CompileTask::wait_for_no_active_tasks();
1989 }
1990
1991 /**
1992 * Initialize compiler thread(s) + compiler object(s). The postcondition
1993 * of this function is that the compiler runtimes are initialized and that
1994 * compiler threads can start compiling.
1995 */
1996 bool CompileBroker::init_compiler_runtime() {
1997 CompilerThread* thread = CompilerThread::current();
1998 AbstractCompiler* comp = thread->compiler();
1999 // Final sanity check - the compiler object must exist
2000 guarantee(comp != nullptr, "Compiler object must exist");
2001
2002 {
2003 // Must switch to native to allocate ci_env
2004 ThreadToNativeFromVM ttn(thread);
2005 ciEnv ci_env((CompileTask*)nullptr);
2006 // Cache Jvmti state
2007 ci_env.cache_jvmti_state();
2008 // Cache DTrace flags
2009 ci_env.cache_dtrace_flags();
2010
2011 // Switch back to VM state to do compiler initialization
2012 ThreadInVMfromNative tv(thread);
2013
2014 comp->initialize();
2015 }
2016
2017 if (comp->is_failed()) {
2018 disable_compilation_forever();
2019 // If compiler initialization failed, no compiler thread that is specific to a
2020 // particular compiler runtime will ever start to compile methods.
2021 shutdown_compiler_runtime(comp, thread);
2022 return false;
2023 }
2024
2025 // C1 specific check
2026 if (comp->is_c1() && (thread->get_buffer_blob() == nullptr)) {
2027 warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
2028 return false;
2029 }
2030
2031 return true;
2032 }
2033
2034 void CompileBroker::free_buffer_blob_if_allocated(CompilerThread* thread) {
2035 BufferBlob* blob = thread->get_buffer_blob();
2036 if (blob != nullptr) {
2037 blob->purge();
2038 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2039 CodeCache::free(blob);
2040 }
2041 }
2042
2043 /**
2044 * If C1 and/or C2 initialization failed, we shut down all compilation.
2045 * We do this to keep things simple. This can be changed if it ever turns
2046 * out to be a problem.
2047 */
2048 void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
2049 free_buffer_blob_if_allocated(thread);
2050
2051 log_info(compilation)("shutdown_compiler_runtime: " INTPTR_FORMAT, p2i(thread));
2052
2053 if (comp->should_perform_shutdown()) {
2054 // There are two reasons for shutting down the compiler
2055 // 1) compiler runtime initialization failed
2056 // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
2057 warning("%s initialization failed. Shutting down all compilers", comp->name());
2058
2059 // Only one thread per compiler runtime object enters here
2060 // Set state to shut down
2061 comp->set_shut_down();
2062
2063 // Delete all queued compilation tasks to make compiler threads exit faster.
2064 if (_c1_compile_queue != nullptr) {
2065 _c1_compile_queue->delete_all();
2066 }
2067
2068 if (_c2_compile_queue != nullptr) {
2069 _c2_compile_queue->delete_all();
2070 }
2071
2072 // Set flags so that we continue execution with using interpreter only.
2073 UseCompiler = false;
2074 UseInterpreter = true;
2075
2076 // We could delete compiler runtimes also. However, there are references to
2077 // the compiler runtime(s) (e.g., nmethod::is_compiled_by_c1()) which then
2078 // fail. This can be done later if necessary.
2079 }
2080 }
2081
2082 /**
2083 * Helper function to create new or reuse old CompileLog.
2084 */
2085 CompileLog* CompileBroker::get_log(CompilerThread* ct) {
2086 if (!LogCompilation) return nullptr;
2087
2088 AbstractCompiler *compiler = ct->compiler();
2089 bool c1 = compiler->is_c1();
2090 jobject* compiler_objects = c1 ? _compiler1_objects : _compiler2_objects;
2091 assert(compiler_objects != nullptr, "must be initialized at this point");
2092 CompileLog** logs = c1 ? _compiler1_logs : _compiler2_logs;
2093 assert(logs != nullptr, "must be initialized at this point");
2094 int count = c1 ? _c1_count : _c2_count;
2095
2096 if (ct->queue() == _ac1_compile_queue || ct->queue() == _ac2_compile_queue) {
2097 compiler_objects = _ac_objects;
2098 logs = _ac_logs;
2099 count = _ac_count;
2100 }
2101 // Find Compiler number by its threadObj.
2102 oop compiler_obj = ct->threadObj();
2103 int compiler_number = 0;
2104 bool found = false;
2105 for (; compiler_number < count; compiler_number++) {
2106 if (JNIHandles::resolve_non_null(compiler_objects[compiler_number]) == compiler_obj) {
2107 found = true;
2108 break;
2109 }
2110 }
2111 assert(found, "Compiler must exist at this point");
2112
2113 // Determine pointer for this thread's log.
2114 CompileLog** log_ptr = &logs[compiler_number];
2115
2116 // Return old one if it exists.
2117 CompileLog* log = *log_ptr;
2118 if (log != nullptr) {
2119 ct->init_log(log);
2120 return log;
2121 }
2122
2123 // Create a new one and remember it.
2124 init_compiler_thread_log();
2125 log = ct->log();
2126 *log_ptr = log;
2127 return log;
2128 }
2129
2130 // ------------------------------------------------------------------
2131 // CompileBroker::compiler_thread_loop
2132 //
2133 // The main loop run by a CompilerThread.
2134 void CompileBroker::compiler_thread_loop() {
2135 CompilerThread* thread = CompilerThread::current();
2136 CompileQueue* queue = thread->queue();
2137 // For the thread that initializes the ciObjectFactory
2138 // this resource mark holds all the shared objects
2139 ResourceMark rm;
2140
2141 // First thread to get here will initialize the compiler interface
2142
2143 {
2144 ASSERT_IN_VM;
2145 MutexLocker only_one (thread, CompileThread_lock);
2146 if (!ciObjectFactory::is_initialized()) {
2147 ciObjectFactory::initialize();
2148 }
2149 }
2150
2151 // Open a log.
2152 CompileLog* log = get_log(thread);
2153 if (log != nullptr) {
2154 log->begin_elem("start_compile_thread name='%s' thread='%zu' process='%d'",
2155 thread->name(),
2156 os::current_thread_id(),
2157 os::current_process_id());
2158 log->stamp();
2159 log->end_elem();
2160 }
2161
2162 if (!thread->init_compilation_timeout()) {
2163 return;
2164 }
2165
2166 // If compiler thread/runtime initialization fails, exit the compiler thread
2167 if (!init_compiler_runtime()) {
2168 return;
2169 }
2170
2171 thread->start_idle_timer();
2172
2173 // Poll for new compilation tasks as long as the JVM runs. Compilation
2174 // should only be disabled if something went wrong while initializing the
2175 // compiler runtimes. This, in turn, should not happen. The only known case
2176 // when compiler runtime initialization fails is if there is not enough free
2177 // space in the code cache to generate the necessary stubs, etc.
2178 while (!is_compilation_disabled_forever()) {
2179 // We need this HandleMark to avoid leaking VM handles.
2180 HandleMark hm(thread);
2181
2182 RecompilationPolicy::recompilation_step(AOTRecompilationWorkUnitSize, thread);
2183
2184 CompileTask* task = queue->get(thread);
2185 if (task == nullptr) {
2186 if (UseDynamicNumberOfCompilerThreads) {
2187 // Access compiler_count under lock to enforce consistency.
2188 MutexLocker only_one(CompileThread_lock);
2189 if (can_remove(thread, true)) {
2190 if (trace_compiler_threads()) {
2191 ResourceMark rm;
2192 stringStream msg;
2193 msg.print("Removing compiler thread %s after " JLONG_FORMAT " ms idle time",
2194 thread->name(), thread->idle_time_millis());
2195 print_compiler_threads(msg);
2196 }
2197
2198 // Notify compiler that the compiler thread is about to stop
2199 thread->compiler()->stopping_compiler_thread(thread);
2200
2201 free_buffer_blob_if_allocated(thread);
2202 return; // Stop this thread.
2203 }
2204 }
2205 } else {
2206 // Assign the task to the current thread. Mark this compilation
2207 // thread as active for the profiler.
2208 // CompileTaskWrapper also keeps the Method* from being deallocated if redefinition
2209 // occurs after fetching the compile task off the queue.
2210 CompileTaskWrapper ctw(task);
2211 methodHandle method(thread, task->method());
2212
2213 // Never compile a method if breakpoints are present in it
2214 if (method()->number_of_breakpoints() == 0) {
2215 // Compile the method.
2216 if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
2217 invoke_compiler_on_method(task);
2218 thread->start_idle_timer();
2219 } else {
2220 // After compilation is disabled, remove remaining methods from queue
2221 method->clear_queued_for_compilation();
2222 method->set_pending_queue_processed(false);
2223 task->set_failure_reason("compilation is disabled");
2224 }
2225 } else {
2226 task->set_failure_reason("breakpoints are present");
2227 }
2228
2229 // Don't use AOT compielr threads for dynamic C1 and C2 threads creation.
2230 if (UseDynamicNumberOfCompilerThreads &&
2231 (queue == _c1_compile_queue || queue == _c2_compile_queue)) {
2232 possibly_add_compiler_threads(thread);
2233 assert(!thread->has_pending_exception(), "should have been handled");
2234 }
2235 }
2236 }
2237
2238 // Shut down compiler runtime
2239 shutdown_compiler_runtime(thread->compiler(), thread);
2240 }
2241
2242 // ------------------------------------------------------------------
2243 // CompileBroker::init_compiler_thread_log
2244 //
2245 // Set up state required by +LogCompilation.
2246 void CompileBroker::init_compiler_thread_log() {
2247 CompilerThread* thread = CompilerThread::current();
2248 char file_name[4*K];
2249 FILE* fp = nullptr;
2250 intx thread_id = os::current_thread_id();
2251 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) {
2252 const char* dir = (try_temp_dir ? os::get_temp_directory() : nullptr);
2253 if (dir == nullptr) {
2254 jio_snprintf(file_name, sizeof(file_name), "hs_c%zu_pid%u.log",
2255 thread_id, os::current_process_id());
2256 } else {
2257 jio_snprintf(file_name, sizeof(file_name),
2258 "%s%shs_c%zu_pid%u.log", dir,
2259 os::file_separator(), thread_id, os::current_process_id());
2260 }
2261
2262 fp = os::fopen(file_name, "wt");
2263 if (fp != nullptr) {
2264 if (LogCompilation && Verbose) {
2265 tty->print_cr("Opening compilation log %s", file_name);
2266 }
2267 CompileLog* log = new(mtCompiler) CompileLog(file_name, fp, thread_id);
2268 if (log == nullptr) {
2269 fclose(fp);
2270 return;
2271 }
2272 thread->init_log(log);
2273
2274 if (xtty != nullptr) {
2275 ttyLocker ttyl;
2276 // Record any per thread log files
2277 xtty->elem("thread_logfile thread='%zd' filename='%s'", thread_id, file_name);
2278 }
2279 return;
2280 }
2281 }
2282 warning("Cannot open log file: %s", file_name);
2283 }
2284
2285 void CompileBroker::log_metaspace_failure() {
2286 const char* message = "some methods may not be compiled because metaspace "
2287 "is out of memory";
2288 if (CompilationLog::log() != nullptr) {
2289 CompilationLog::log()->log_metaspace_failure(message);
2290 }
2291 if (PrintCompilation) {
2292 tty->print_cr("COMPILE PROFILING SKIPPED: %s", message);
2293 }
2294 }
2295
2296
2297 // ------------------------------------------------------------------
2298 // CompileBroker::set_should_block
2299 //
2300 // Set _should_block.
2301 // Call this from the VM, with Threads_lock held and a safepoint requested.
2302 void CompileBroker::set_should_block() {
2303 assert(Threads_lock->owner() == Thread::current(), "must have threads lock");
2304 assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint already");
2305 #ifndef PRODUCT
2306 if (PrintCompilation && (Verbose || WizardMode))
2307 tty->print_cr("notifying compiler thread pool to block");
2308 #endif
2309 _should_block = true;
2310 }
2311
2312 // ------------------------------------------------------------------
2313 // CompileBroker::maybe_block
2314 //
2315 // Call this from the compiler at convenient points, to poll for _should_block.
2316 void CompileBroker::maybe_block() {
2317 if (_should_block) {
2318 #ifndef PRODUCT
2319 if (PrintCompilation && (Verbose || WizardMode))
2320 tty->print_cr("compiler thread " INTPTR_FORMAT " poll detects block request", p2i(Thread::current()));
2321 #endif
2322 // If we are executing a task during the request to block, report the task
2323 // before disappearing.
2324 CompilerThread* thread = CompilerThread::current();
2325 if (thread != nullptr) {
2326 CompileTask* task = thread->task();
2327 if (task != nullptr) {
2328 if (PrintCompilation) {
2329 task->print(tty, "blocked");
2330 }
2331 task->print_ul("blocked");
2332 }
2333 }
2334 // Go to VM state and block for final VM shutdown safepoint.
2335 ThreadInVMfromNative tivfn(JavaThread::current());
2336 assert(false, "Should never unblock from TIVNM entry");
2337 }
2338 }
2339
2340 // wrapper for CodeCache::print_summary()
2341 static void codecache_print(bool detailed)
2342 {
2343 stringStream s;
2344 // Dump code cache into a buffer before locking the tty,
2345 {
2346 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2347 CodeCache::print_summary(&s, detailed);
2348 }
2349 ttyLocker ttyl;
2350 tty->print("%s", s.freeze());
2351 }
2352
2353 // wrapper for CodeCache::print_summary() using outputStream
2354 static void codecache_print(outputStream* out, bool detailed) {
2355 stringStream s;
2356
2357 // Dump code cache into a buffer
2358 {
2359 MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
2360 CodeCache::print_summary(&s, detailed);
2361 }
2362
2363 char* remaining_log = s.as_string();
2364 while (*remaining_log != '\0') {
2365 char* eol = strchr(remaining_log, '\n');
2366 if (eol == nullptr) {
2367 out->print_cr("%s", remaining_log);
2368 remaining_log = remaining_log + strlen(remaining_log);
2369 } else {
2370 *eol = '\0';
2371 out->print_cr("%s", remaining_log);
2372 remaining_log = eol + 1;
2373 }
2374 }
2375 }
2376
2377 void CompileBroker::handle_compile_error(CompilerThread* thread, CompileTask* task, ciEnv* ci_env,
2378 int compilable, const char* failure_reason) {
2379 if (!AbortVMOnCompilationFailure) {
2380 return;
2381 }
2382 if (compilable == ciEnv::MethodCompilable_not_at_tier) {
2383 fatal("Not compilable at tier %d: %s", task->comp_level(), failure_reason);
2384 }
2385 if (compilable == ciEnv::MethodCompilable_never) {
2386 fatal("Never compilable: %s", failure_reason);
2387 }
2388 }
2389
2390 static void post_compilation_event(EventCompilation& event, CompileTask* task) {
2391 assert(task != nullptr, "invariant");
2392 CompilerEvent::CompilationEvent::post(event,
2393 task->compile_id(),
2394 task->compiler()->type(),
2395 task->method(),
2396 task->comp_level(),
2397 task->is_success(),
2398 task->osr_bci() != CompileBroker::standard_entry_bci,
2399 task->nm_total_size(),
2400 task->num_inlined_bytecodes(),
2401 task->arena_bytes());
2402 }
2403
2404 int DirectivesStack::_depth = 0;
2405 CompilerDirectives* DirectivesStack::_top = nullptr;
2406 CompilerDirectives* DirectivesStack::_bottom = nullptr;
2407
2408 // Acquires Compilation_lock and waits for it to be notified
2409 // as long as WhiteBox::compilation_locked is true.
2410 static void whitebox_lock_compilation() {
2411 MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
2412 while (WhiteBox::compilation_locked) {
2413 locker.wait();
2414 }
2415 }
2416
2417 // ------------------------------------------------------------------
2418 // CompileBroker::invoke_compiler_on_method
2419 //
2420 // Compile a method.
2421 //
2422 void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
2423 task->print_ul();
2424 elapsedTimer time;
2425
2426 DirectiveSet* directive = task->directive();
2427
2428 CompilerThread* thread = CompilerThread::current();
2429 ResourceMark rm(thread);
2430
2431 if (CompilationLog::log() != nullptr) {
2432 CompilationLog::log()->log_compile(thread, task);
2433 }
2434
2435 // Common flags.
2436 int compile_id = task->compile_id();
2437 int osr_bci = task->osr_bci();
2438 bool is_osr = (osr_bci != standard_entry_bci);
2439 bool should_log = (thread->log() != nullptr);
2440 bool should_break = false;
2441 bool should_print_compilation = PrintCompilation || directive->PrintCompilationOption;
2442 const int task_level = task->comp_level();
2443 AbstractCompiler* comp = task->compiler();
2444 {
2445 // create the handle inside it's own block so it can't
2446 // accidentally be referenced once the thread transitions to
2447 // native. The NoHandleMark before the transition should catch
2448 // any cases where this occurs in the future.
2449 methodHandle method(thread, task->method());
2450
2451 assert(!method->is_native(), "no longer compile natives");
2452
2453 // Update compile information when using perfdata.
2454 if (UsePerfData) {
2455 update_compile_perf_data(thread, method, is_osr);
2456 }
2457
2458 DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, compiler_name(task_level));
2459 }
2460
2461 should_break = directive->BreakAtCompileOption || task->check_break_at_flags();
2462 if (should_log && !directive->LogOption) {
2463 should_log = false;
2464 }
2465
2466 // Allocate a new set of JNI handles.
2467 JNIHandleMark jhm(thread);
2468 Method* target_handle = task->method();
2469 int compilable = ciEnv::MethodCompilable;
2470 const char* failure_reason = nullptr;
2471 bool failure_reason_on_C_heap = false;
2472 const char* retry_message = nullptr;
2473
2474 #if INCLUDE_JVMCI
2475 if (UseJVMCICompiler && comp != nullptr && comp->is_jvmci()) {
2476 JVMCICompiler* jvmci = (JVMCICompiler*) comp;
2477
2478 TraceTime t1("compilation", &time);
2479 EventCompilation event;
2480 JVMCICompileState compile_state(task, jvmci);
2481 JVMCIRuntime *runtime = nullptr;
2482
2483 if (JVMCI::in_shutdown()) {
2484 failure_reason = "in JVMCI shutdown";
2485 retry_message = "not retryable";
2486 compilable = ciEnv::MethodCompilable_never;
2487 } else if (compile_state.target_method_is_old()) {
2488 // Skip redefined methods
2489 failure_reason = "redefined method";
2490 retry_message = "not retryable";
2491 compilable = ciEnv::MethodCompilable_never;
2492 } else {
2493 JVMCIEnv env(thread, &compile_state, __FILE__, __LINE__);
2494 if (env.init_error() != JNI_OK) {
2495 const char* msg = env.init_error_msg();
2496 failure_reason = os::strdup(err_msg("Error attaching to libjvmci (err: %d, %s)",
2497 env.init_error(), msg == nullptr ? "unknown" : msg), mtJVMCI);
2498 bool reason_on_C_heap = true;
2499 // In case of JNI_ENOMEM, there's a good chance a subsequent attempt to create libjvmci or attach to it
2500 // might succeed. Other errors most likely indicate a non-recoverable error in the JVMCI runtime.
2501 bool retryable = env.init_error() == JNI_ENOMEM;
2502 compile_state.set_failure(retryable, failure_reason, reason_on_C_heap);
2503 }
2504 if (failure_reason == nullptr) {
2505 if (WhiteBoxAPI && WhiteBox::compilation_locked) {
2506 // Must switch to native to block
2507 ThreadToNativeFromVM ttn(thread);
2508 whitebox_lock_compilation();
2509 }
2510 methodHandle method(thread, target_handle);
2511 runtime = env.runtime();
2512 runtime->compile_method(&env, jvmci, method, osr_bci);
2513
2514 failure_reason = compile_state.failure_reason();
2515 failure_reason_on_C_heap = compile_state.failure_reason_on_C_heap();
2516 if (!compile_state.retryable()) {
2517 retry_message = "not retryable";
2518 compilable = ciEnv::MethodCompilable_not_at_tier;
2519 }
2520 if (!task->is_success()) {
2521 assert(failure_reason != nullptr, "must specify failure_reason");
2522 }
2523 }
2524 }
2525 if (!task->is_success() && !JVMCI::in_shutdown()) {
2526 handle_compile_error(thread, task, nullptr, compilable, failure_reason);
2527 }
2528 if (event.should_commit()) {
2529 post_compilation_event(event, task);
2530 }
2531
2532 if (runtime != nullptr) {
2533 runtime->post_compile(thread);
2534 }
2535 } else
2536 #endif // INCLUDE_JVMCI
2537 {
2538 NoHandleMark nhm;
2539 ThreadToNativeFromVM ttn(thread);
2540
2541 ciEnv ci_env(task);
2542 if (should_break) {
2543 ci_env.set_break_at_compile(true);
2544 }
2545 if (should_log) {
2546 ci_env.set_log(thread->log());
2547 }
2548 assert(thread->env() == &ci_env, "set by ci_env");
2549 // The thread-env() field is cleared in ~CompileTaskWrapper.
2550
2551 // Cache Jvmti state
2552 bool method_is_old = ci_env.cache_jvmti_state();
2553
2554 // Skip redefined methods
2555 if (method_is_old) {
2556 ci_env.record_method_not_compilable("redefined method", true);
2557 }
2558
2559 // Cache DTrace flags
2560 ci_env.cache_dtrace_flags();
2561
2562 ciMethod* target = ci_env.get_method_from_handle(target_handle);
2563
2564 TraceTime t1("compilation", &time);
2565 EventCompilation event;
2566
2567 if (comp == nullptr) {
2568 ci_env.record_method_not_compilable("no compiler");
2569 } else if (!ci_env.failing()) {
2570 if (WhiteBoxAPI && WhiteBox::compilation_locked) {
2571 whitebox_lock_compilation();
2572 }
2573 comp->compile_method(&ci_env, target, osr_bci, true, directive);
2574
2575 /* Repeat compilation without installing code for profiling purposes */
2576 int repeat_compilation_count = task->is_aot_load() ? 0 : directive->RepeatCompilationOption;
2577 if (repeat_compilation_count > 0) {
2578 CHeapStringHolder failure_reason;
2579 failure_reason.set(ci_env._failure_reason.get());
2580 while (repeat_compilation_count > 0) {
2581 ResourceMark rm(thread);
2582 task->print_ul("NO CODE INSTALLED");
2583 thread->timeout()->reset();
2584 ci_env._failure_reason.clear();
2585 comp->compile_method(&ci_env, target, osr_bci, false, directive);
2586 repeat_compilation_count--;
2587 }
2588 ci_env._failure_reason.set(failure_reason.get());
2589 }
2590 }
2591
2592
2593 if (!ci_env.failing() && !task->is_success() && !task->is_precompile()) {
2594 assert(ci_env.failure_reason() != nullptr, "expect failure reason");
2595 assert(false, "compiler should always document failure: %s", ci_env.failure_reason());
2596 // The compiler elected, without comment, not to register a result.
2597 // Do not attempt further compilations of this method.
2598 ci_env.record_method_not_compilable("compile failed");
2599 }
2600
2601 // Copy this bit to the enclosing block:
2602 compilable = ci_env.compilable();
2603
2604 if (ci_env.failing()) {
2605 // Duplicate the failure reason string, so that it outlives ciEnv
2606 failure_reason = os::strdup(ci_env.failure_reason(), mtCompiler);
2607 failure_reason_on_C_heap = true;
2608 retry_message = ci_env.retry_message();
2609 ci_env.report_failure(failure_reason);
2610 }
2611
2612 if (ci_env.failing()) {
2613 handle_compile_error(thread, task, &ci_env, compilable, failure_reason);
2614 }
2615 if (event.should_commit()) {
2616 post_compilation_event(event, task);
2617 }
2618 }
2619
2620 if (failure_reason != nullptr) {
2621 task->set_failure_reason(failure_reason, failure_reason_on_C_heap);
2622 if (CompilationLog::log() != nullptr) {
2623 CompilationLog::log()->log_failure(thread, task, failure_reason, retry_message);
2624 }
2625 if (PrintCompilation || directive->PrintCompilationOption) {
2626 FormatBufferResource msg = retry_message != nullptr ?
2627 FormatBufferResource("COMPILE SKIPPED: %s (%s)", failure_reason, retry_message) :
2628 FormatBufferResource("COMPILE SKIPPED: %s", failure_reason);
2629 task->print(tty, msg);
2630 }
2631 }
2632
2633 task->mark_finished(os::elapsed_counter());
2634 DirectivesStack::release(directive);
2635
2636 methodHandle method(thread, task->method());
2637
2638 DTRACE_METHOD_COMPILE_END_PROBE(method, compiler_name(task_level), task->is_success());
2639
2640 collect_statistics(thread, time, task);
2641
2642 if (PrintCompilation && PrintCompilation2) {
2643 tty->print("%7d ", (int) tty->time_stamp().milliseconds()); // print timestamp
2644 tty->print("%4d ", compile_id); // print compilation number
2645 tty->print("%s ", (is_osr ? "%" : (task->is_aot_load() ? (task->preload() ? "P" : "A") : " ")));
2646 if (task->is_success()) {
2647 tty->print("size: %d(%d) ", task->nm_total_size(), task->nm_insts_size());
2648 }
2649 tty->print_cr("time: %d inlined: %d bytes", (int)time.milliseconds(), task->num_inlined_bytecodes());
2650 }
2651
2652 Log(compilation, codecache) log;
2653 if (log.is_debug()) {
2654 LogStream ls(log.debug());
2655 codecache_print(&ls, /* detailed= */ false);
2656 }
2657 if (PrintCodeCacheOnCompilation) {
2658 codecache_print(/* detailed= */ false);
2659 }
2660 // Disable compilation, if required.
2661 switch (compilable) {
2662 case ciEnv::MethodCompilable_never:
2663 if (is_osr)
2664 method->set_not_osr_compilable_quietly("MethodCompilable_never");
2665 else
2666 method->set_not_compilable_quietly("MethodCompilable_never");
2667 break;
2668 case ciEnv::MethodCompilable_not_at_tier:
2669 if (is_osr)
2670 method->set_not_osr_compilable_quietly("MethodCompilable_not_at_tier", task_level);
2671 else
2672 method->set_not_compilable_quietly("MethodCompilable_not_at_tier", task_level);
2673 break;
2674 }
2675
2676 // Note that the queued_for_compilation bits are cleared without
2677 // protection of a mutex. [They were set by the requester thread,
2678 // when adding the task to the compile queue -- at which time the
2679 // compile queue lock was held. Subsequently, we acquired the compile
2680 // queue lock to get this task off the compile queue; thus (to belabour
2681 // the point somewhat) our clearing of the bits must be occurring
2682 // only after the setting of the bits. See also 14012000 above.
2683 method->clear_queued_for_compilation();
2684 method->set_pending_queue_processed(false);
2685
2686 if (should_print_compilation) {
2687 ResourceMark rm;
2688 task->print_tty();
2689 }
2690 }
2691
2692 /**
2693 * The CodeCache is full. Print warning and disable compilation.
2694 * Schedule code cache cleaning so compilation can continue later.
2695 * This function needs to be called only from CodeCache::allocate(),
2696 * since we currently handle a full code cache uniformly.
2697 */
2698 void CompileBroker::handle_full_code_cache(CodeBlobType code_blob_type) {
2699 UseInterpreter = true;
2700 if (UseCompiler || AlwaysCompileLoopMethods ) {
2701 if (xtty != nullptr) {
2702 stringStream s;
2703 // Dump code cache state into a buffer before locking the tty,
2704 // because log_state() will use locks causing lock conflicts.
2705 CodeCache::log_state(&s);
2706 // Lock to prevent tearing
2707 ttyLocker ttyl;
2708 xtty->begin_elem("code_cache_full");
2709 xtty->print("%s", s.freeze());
2710 xtty->stamp();
2711 xtty->end_elem();
2712 }
2713
2714 #ifndef PRODUCT
2715 if (ExitOnFullCodeCache) {
2716 codecache_print(/* detailed= */ true);
2717 before_exit(JavaThread::current());
2718 exit_globals(); // will delete tty
2719 vm_direct_exit(1);
2720 }
2721 #endif
2722 if (UseCodeCacheFlushing) {
2723 // Since code cache is full, immediately stop new compiles
2724 if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
2725 log_info(codecache)("Code cache is full - disabling compilation");
2726 }
2727 } else {
2728 disable_compilation_forever();
2729 }
2730
2731 CodeCache::report_codemem_full(code_blob_type, should_print_compiler_warning());
2732 }
2733 }
2734
2735 // ------------------------------------------------------------------
2736 // CompileBroker::update_compile_perf_data
2737 //
2738 // Record this compilation for debugging purposes.
2739 void CompileBroker::update_compile_perf_data(CompilerThread* thread, const methodHandle& method, bool is_osr) {
2740 ResourceMark rm;
2741 char* method_name = method->name()->as_C_string();
2742 char current_method[CompilerCounters::cmname_buffer_length];
2743 size_t maxLen = CompilerCounters::cmname_buffer_length;
2744
2745 const char* class_name = method->method_holder()->name()->as_C_string();
2746
2747 size_t s1len = strlen(class_name);
2748 size_t s2len = strlen(method_name);
2749
2750 // check if we need to truncate the string
2751 if (s1len + s2len + 2 > maxLen) {
2752
2753 // the strategy is to lop off the leading characters of the
2754 // class name and the trailing characters of the method name.
2755
2756 if (s2len + 2 > maxLen) {
2757 // lop of the entire class name string, let snprintf handle
2758 // truncation of the method name.
2759 class_name += s1len; // null string
2760 }
2761 else {
2762 // lop off the extra characters from the front of the class name
2763 class_name += ((s1len + s2len + 2) - maxLen);
2764 }
2765 }
2766
2767 jio_snprintf(current_method, maxLen, "%s %s", class_name, method_name);
2768
2769 int last_compile_type = normal_compile;
2770 if (CICountOSR && is_osr) {
2771 last_compile_type = osr_compile;
2772 } else if (CICountNative && method->is_native()) {
2773 last_compile_type = native_compile;
2774 }
2775
2776 CompilerCounters* counters = thread->counters();
2777 counters->set_current_method(current_method);
2778 counters->set_compile_type((jlong) last_compile_type);
2779 }
2780
2781 // ------------------------------------------------------------------
2782 // CompileBroker::collect_statistics
2783 //
2784 // Collect statistics about the compilation.
2785
2786 void CompileBroker::collect_statistics(CompilerThread* thread, elapsedTimer time, CompileTask* task) {
2787 bool success = task->is_success();
2788 methodHandle method (thread, task->method());
2789 int compile_id = task->compile_id();
2790 bool is_osr = (task->osr_bci() != standard_entry_bci);
2791 const int comp_level = task->comp_level();
2792 CompilerCounters* counters = thread->counters();
2793
2794 MutexLocker locker(CompileStatistics_lock);
2795
2796 // _perf variables are production performance counters which are
2797 // updated regardless of the setting of the CITime and CITimeEach flags
2798 //
2799
2800 // account all time, including bailouts and failures in this counter;
2801 // C1 and C2 counters are counting both successful and unsuccessful compiles
2802 _t_total_compilation.add(&time);
2803
2804 // Update compilation times. Used by the implementation of JFR CompilerStatistics
2805 // and java.lang.management.CompilationMXBean.
2806 _perf_total_compilation->inc(time.ticks());
2807 _peak_compilation_time = MAX2(time.milliseconds(), _peak_compilation_time);
2808
2809 if (!success) {
2810 _total_bailout_count++;
2811 if (UsePerfData) {
2812 _perf_last_failed_method->set_value(counters->current_method());
2813 _perf_last_failed_type->set_value(counters->compile_type());
2814 _perf_total_bailout_count->inc();
2815 }
2816 _t_bailedout_compilation.add(&time);
2817
2818 if (CITime || log_is_enabled(Info, init)) {
2819 CompilerStatistics* stats = nullptr;
2820 if (task->is_aot_load()) {
2821 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2822 stats = &_aot_stats_per_level[level];
2823 } else {
2824 stats = &_stats_per_level[comp_level-1];
2825 }
2826 stats->_bailout.update(time, 0);
2827 }
2828 } else if (!task->is_success()) {
2829 if (UsePerfData) {
2830 _perf_last_invalidated_method->set_value(counters->current_method());
2831 _perf_last_invalidated_type->set_value(counters->compile_type());
2832 _perf_total_invalidated_count->inc();
2833 }
2834 _total_invalidated_count++;
2835 _t_invalidated_compilation.add(&time);
2836
2837 if (CITime || log_is_enabled(Info, init)) {
2838 CompilerStatistics* stats = nullptr;
2839 if (task->is_aot_load()) {
2840 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2841 stats = &_aot_stats_per_level[level];
2842 } else {
2843 stats = &_stats_per_level[comp_level-1];
2844 }
2845 stats->_invalidated.update(time, 0);
2846 }
2847 } else {
2848 // Compilation succeeded
2849 if (CITime || log_is_enabled(Info, init)) {
2850 int bytes_compiled = method->code_size() + task->num_inlined_bytecodes();
2851 if (is_osr) {
2852 _t_osr_compilation.add(&time);
2853 _sum_osr_bytes_compiled += bytes_compiled;
2854 } else {
2855 _t_standard_compilation.add(&time);
2856 _sum_standard_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
2857 }
2858
2859 // Collect statistic per compilation level
2860 if (task->is_aot_load()) {
2861 _aot_stats._standard.update(time, bytes_compiled);
2862 _aot_stats._nmethods_size += task->nm_total_size();
2863 _aot_stats._nmethods_code_size += task->nm_insts_size();
2864 int level = task->preload() ? CompLevel_full_optimization : (comp_level - 1);
2865 CompilerStatistics* stats = &_aot_stats_per_level[level];
2866 stats->_standard.update(time, bytes_compiled);
2867 stats->_nmethods_size += task->nm_total_size();
2868 stats->_nmethods_code_size += task->nm_insts_size();
2869 } else if (comp_level > CompLevel_none && comp_level <= CompLevel_full_optimization) {
2870 CompilerStatistics* stats = &_stats_per_level[comp_level-1];
2871 if (is_osr) {
2872 stats->_osr.update(time, bytes_compiled);
2873 } else {
2874 stats->_standard.update(time, bytes_compiled);
2875 }
2876 stats->_nmethods_size += task->nm_total_size();
2877 stats->_nmethods_code_size += task->nm_insts_size();
2878 } else {
2879 assert(false, "CompilerStatistics object does not exist for compilation level %d", comp_level);
2880 }
2881
2882 // Collect statistic per compiler
2883 AbstractCompiler* comp = task->compiler();
2884 if (comp && !task->is_aot_load()) {
2885 CompilerStatistics* stats = comp->stats();
2886 if (is_osr) {
2887 stats->_osr.update(time, bytes_compiled);
2888 } else {
2889 stats->_standard.update(time, bytes_compiled);
2890 }
2891 stats->_nmethods_size += task->nm_total_size();
2892 stats->_nmethods_code_size += task->nm_insts_size();
2893 } else if (!task->is_aot_load()) { // if (!comp)
2894 assert(false, "Compiler object must exist");
2895 }
2896 }
2897
2898 if (UsePerfData) {
2899 // save the name of the last method compiled
2900 _perf_last_method->set_value(counters->current_method());
2901 _perf_last_compile_type->set_value(counters->compile_type());
2902 _perf_last_compile_size->set_value(method->code_size() +
2903 task->num_inlined_bytecodes());
2904 if (is_osr) {
2905 _perf_osr_compilation->inc(time.ticks());
2906 _perf_sum_osr_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
2907 } else {
2908 _perf_standard_compilation->inc(time.ticks());
2909 _perf_sum_standard_bytes_compiled->inc(method->code_size() + task->num_inlined_bytecodes());
2910 }
2911 }
2912
2913 if (CITimeEach) {
2914 double compile_time = time.seconds();
2915 double bytes_per_sec = compile_time == 0.0 ? 0.0 : (double)(method->code_size() + task->num_inlined_bytecodes()) / compile_time;
2916 tty->print_cr("%3d seconds: %6.3f bytes/sec : %f (bytes %d + %d inlined)",
2917 compile_id, compile_time, bytes_per_sec, method->code_size(), task->num_inlined_bytecodes());
2918 }
2919
2920 // Collect counts of successful compilations
2921 _sum_nmethod_size += task->nm_total_size();
2922 _sum_nmethod_code_size += task->nm_insts_size();
2923 _total_compile_count++;
2924
2925 if (UsePerfData) {
2926 _perf_sum_nmethod_size->inc( task->nm_total_size());
2927 _perf_sum_nmethod_code_size->inc(task->nm_insts_size());
2928 _perf_total_compile_count->inc();
2929 }
2930
2931 if (is_osr) {
2932 if (UsePerfData) _perf_total_osr_compile_count->inc();
2933 _total_osr_compile_count++;
2934 } else {
2935 if (UsePerfData) _perf_total_standard_compile_count->inc();
2936 _total_standard_compile_count++;
2937 }
2938 }
2939 // set the current method for the thread to null
2940 if (UsePerfData) counters->set_current_method("");
2941 }
2942
2943 const char* CompileBroker::compiler_name(int comp_level) {
2944 AbstractCompiler *comp = CompileBroker::compiler(comp_level);
2945 if (comp == nullptr) {
2946 return "no compiler";
2947 } else {
2948 return (comp->name());
2949 }
2950 }
2951
2952 jlong CompileBroker::total_compilation_ticks() {
2953 return _perf_total_compilation != nullptr ? _perf_total_compilation->get_value() : 0;
2954 }
2955
2956 void CompileBroker::log_not_entrant(nmethod* nm) {
2957 _total_not_entrant_count++;
2958 if (CITime || log_is_enabled(Info, init)) {
2959 CompilerStatistics* stats = nullptr;
2960 int level = nm->comp_level();
2961 if (nm->is_aot()) {
2962 if (nm->preloaded()) {
2963 assert(level == CompLevel_full_optimization, "%d", level);
2964 level = CompLevel_full_optimization + 1;
2965 }
2966 stats = &_aot_stats_per_level[level - 1];
2967 } else {
2968 stats = &_stats_per_level[level - 1];
2969 }
2970 stats->_made_not_entrant._count++;
2971 }
2972 }
2973
2974 void CompileBroker::print_times(const char* name, CompilerStatistics* stats) {
2975 tty->print_cr(" %s {speed: %6.3f bytes/s; standard: %6.3f s, %u bytes, %u methods; osr: %6.3f s, %u bytes, %u methods; nmethods_size: %u bytes; nmethods_code_size: %u bytes}",
2976 name, stats->bytes_per_second(),
2977 stats->_standard._time.seconds(), stats->_standard._bytes, stats->_standard._count,
2978 stats->_osr._time.seconds(), stats->_osr._bytes, stats->_osr._count,
2979 stats->_nmethods_size, stats->_nmethods_code_size);
2980 }
2981
2982 static void print_helper(outputStream* st, const char* name, CompilerStatistics::Data data, bool print_time = true) {
2983 if (data._count > 0) {
2984 st->print("; %s: %4u methods", name, data._count);
2985 if (print_time) {
2986 st->print(" (in %.3fs)", data._time.seconds());
2987 }
2988 }
2989 }
2990
2991 static void print_tier_helper(outputStream* st, const char* prefix, int tier, CompilerStatistics* stats) {
2992 st->print(" %s%d: %5u methods", prefix, tier, stats->_standard._count);
2993 if (stats->_standard._count > 0) {
2994 st->print(" (in %.3fs)", stats->_standard._time.seconds());
2995 }
2996 print_helper(st, "osr", stats->_osr);
2997 print_helper(st, "bailout", stats->_bailout);
2998 print_helper(st, "invalid", stats->_invalidated);
2999 print_helper(st, "not_entrant", stats->_made_not_entrant, false);
3000 st->cr();
3001 }
3002
3003 static void print_queue_info(outputStream* st, CompileQueue* queue) {
3004 if (queue != nullptr) {
3005 MutexLocker ml(queue->lock());
3006
3007 uint total_cnt = 0;
3008 uint active_cnt = 0;
3009 for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) {
3010 guarantee(jt != nullptr, "");
3011 if (jt->is_Compiler_thread()) {
3012 CompilerThread* ct = (CompilerThread*)jt;
3013
3014 guarantee(ct != nullptr, "");
3015 if (ct->queue() == queue) {
3016 ++total_cnt;
3017 CompileTask* task = ct->task();
3018 if (task != nullptr) {
3019 ++active_cnt;
3020 }
3021 }
3022 }
3023 }
3024
3025 st->print(" %s (%d active / %d total threads): %u tasks",
3026 queue->name(), active_cnt, total_cnt, queue->size());
3027 if (queue->size() > 0) {
3028 uint counts[] = {0, 0, 0, 0, 0}; // T1 ... T5
3029 for (CompileTask* task = queue->first(); task != nullptr; task = task->next()) {
3030 int tier = task->comp_level();
3031 if (task->is_aot_load() && task->preload()) {
3032 assert(tier == CompLevel_full_optimization, "%d", tier);
3033 tier = CompLevel_full_optimization + 1;
3034 }
3035 counts[tier-1]++;
3036 }
3037 st->print(":");
3038 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3039 uint cnt = counts[tier-1];
3040 if (cnt > 0) {
3041 st->print(" T%d: %u tasks;", tier, cnt);
3042 }
3043 }
3044 }
3045 st->cr();
3046
3047 // for (JavaThread* jt : *ThreadsSMRSupport::get_java_thread_list()) {
3048 // guarantee(jt != nullptr, "");
3049 // if (jt->is_Compiler_thread()) {
3050 // CompilerThread* ct = (CompilerThread*)jt;
3051 //
3052 // guarantee(ct != nullptr, "");
3053 // if (ct->queue() == queue) {
3054 // ResourceMark rm;
3055 // CompileTask* task = ct->task();
3056 // st->print(" %s: ", ct->name_raw());
3057 // if (task != nullptr) {
3058 // task->print(st, nullptr, true /*short_form*/, false /*cr*/);
3059 // }
3060 // st->cr();
3061 // }
3062 // }
3063 // }
3064 }
3065 }
3066 void CompileBroker::print_statistics_on(outputStream* st) {
3067 st->print_cr(" Total: %u methods; %u bailouts, %u invalidated, %u non_entrant",
3068 _total_compile_count, _total_bailout_count, _total_invalidated_count, _total_not_entrant_count);
3069 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) {
3070 print_tier_helper(st, "Tier", tier, &_stats_per_level[tier-1]);
3071 }
3072 st->cr();
3073
3074 if (AOTCodeCaching) {
3075 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3076 if (tier != CompLevel_full_profile) {
3077 print_tier_helper(st, "AOT Code T", tier, &_aot_stats_per_level[tier - 1]);
3078 }
3079 }
3080 st->cr();
3081 }
3082
3083 print_queue_info(st, _c1_compile_queue);
3084 print_queue_info(st, _c2_compile_queue);
3085 print_queue_info(st, _ac1_compile_queue);
3086 print_queue_info(st, _ac2_compile_queue);
3087 }
3088
3089 void CompileBroker::print_times(bool per_compiler, bool aggregate) {
3090 if (per_compiler) {
3091 if (aggregate) {
3092 tty->cr();
3093 tty->print_cr("[%dms] Individual compiler times (for compiled methods only)", (int)tty->time_stamp().milliseconds());
3094 tty->print_cr("------------------------------------------------");
3095 tty->cr();
3096 }
3097 for (unsigned int i = 0; i < sizeof(_compilers) / sizeof(AbstractCompiler*); i++) {
3098 AbstractCompiler* comp = _compilers[i];
3099 if (comp != nullptr) {
3100 print_times(comp->name(), comp->stats());
3101 }
3102 }
3103 if (_aot_stats._standard._count > 0) {
3104 print_times("SC", &_aot_stats);
3105 }
3106 if (aggregate) {
3107 tty->cr();
3108 tty->print_cr("Individual compilation Tier times (for compiled methods only)");
3109 tty->print_cr("------------------------------------------------");
3110 tty->cr();
3111 }
3112 char tier_name[256];
3113 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) {
3114 CompilerStatistics* stats = &_stats_per_level[tier-1];
3115 os::snprintf_checked(tier_name, sizeof(tier_name), "Tier%d", tier);
3116 print_times(tier_name, stats);
3117 }
3118 for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level() + 1; tier++) {
3119 CompilerStatistics* stats = &_aot_stats_per_level[tier-1];
3120 if (stats->_standard._bytes > 0) {
3121 os::snprintf_checked(tier_name, sizeof(tier_name), "AOT Code T%d", tier);
3122 print_times(tier_name, stats);
3123 }
3124 }
3125 }
3126
3127 if (!aggregate) {
3128 return;
3129 }
3130
3131 elapsedTimer standard_compilation = CompileBroker::_t_standard_compilation;
3132 elapsedTimer osr_compilation = CompileBroker::_t_osr_compilation;
3133 elapsedTimer total_compilation = CompileBroker::_t_total_compilation;
3134
3135 uint standard_bytes_compiled = CompileBroker::_sum_standard_bytes_compiled;
3136 uint osr_bytes_compiled = CompileBroker::_sum_osr_bytes_compiled;
3137
3138 uint standard_compile_count = CompileBroker::_total_standard_compile_count;
3139 uint osr_compile_count = CompileBroker::_total_osr_compile_count;
3140 uint total_compile_count = CompileBroker::_total_compile_count;
3141 uint total_bailout_count = CompileBroker::_total_bailout_count;
3142 uint total_invalidated_count = CompileBroker::_total_invalidated_count;
3143
3144 uint nmethods_code_size = CompileBroker::_sum_nmethod_code_size;
3145 uint nmethods_size = CompileBroker::_sum_nmethod_size;
3146
3147 tty->cr();
3148 tty->print_cr("Accumulated compiler times");
3149 tty->print_cr("----------------------------------------------------------");
3150 //0000000000111111111122222222223333333333444444444455555555556666666666
3151 //0123456789012345678901234567890123456789012345678901234567890123456789
3152 tty->print_cr(" Total compilation time : %7.3f s", total_compilation.seconds());
3153 tty->print_cr(" Standard compilation : %7.3f s, Average : %2.3f s",
3154 standard_compilation.seconds(),
3155 standard_compile_count == 0 ? 0.0 : standard_compilation.seconds() / standard_compile_count);
3156 tty->print_cr(" Bailed out compilation : %7.3f s, Average : %2.3f s",
3157 CompileBroker::_t_bailedout_compilation.seconds(),
3158 total_bailout_count == 0 ? 0.0 : CompileBroker::_t_bailedout_compilation.seconds() / total_bailout_count);
3159 tty->print_cr(" On stack replacement : %7.3f s, Average : %2.3f s",
3160 osr_compilation.seconds(),
3161 osr_compile_count == 0 ? 0.0 : osr_compilation.seconds() / osr_compile_count);
3162 tty->print_cr(" Invalidated : %7.3f s, Average : %2.3f s",
3163 CompileBroker::_t_invalidated_compilation.seconds(),
3164 total_invalidated_count == 0 ? 0.0 : CompileBroker::_t_invalidated_compilation.seconds() / total_invalidated_count);
3165
3166 if (AOTCodeCaching) { // Check flags because AOT code cache could be closed already
3167 tty->cr();
3168 AOTCodeCache::print_timers_on(tty);
3169 }
3170 AbstractCompiler *comp = compiler(CompLevel_simple);
3171 if (comp != nullptr) {
3172 tty->cr();
3173 comp->print_timers();
3174 }
3175 comp = compiler(CompLevel_full_optimization);
3176 if (comp != nullptr) {
3177 tty->cr();
3178 comp->print_timers();
3179 }
3180 #if INCLUDE_JVMCI
3181 if (EnableJVMCI) {
3182 JVMCICompiler *jvmci_comp = JVMCICompiler::instance(false, JavaThread::current_or_null());
3183 if (jvmci_comp != nullptr && jvmci_comp != comp) {
3184 tty->cr();
3185 jvmci_comp->print_timers();
3186 }
3187 }
3188 #endif
3189
3190 tty->cr();
3191 tty->print_cr(" Total compiled methods : %8u methods", total_compile_count);
3192 tty->print_cr(" Standard compilation : %8u methods", standard_compile_count);
3193 tty->print_cr(" On stack replacement : %8u methods", osr_compile_count);
3194 uint tcb = osr_bytes_compiled + standard_bytes_compiled;
3195 tty->print_cr(" Total compiled bytecodes : %8u bytes", tcb);
3196 tty->print_cr(" Standard compilation : %8u bytes", standard_bytes_compiled);
3197 tty->print_cr(" On stack replacement : %8u bytes", osr_bytes_compiled);
3198 double tcs = total_compilation.seconds();
3199 uint bps = tcs == 0.0 ? 0 : (uint)(tcb / tcs);
3200 tty->print_cr(" Average compilation speed : %8u bytes/s", bps);
3201 tty->cr();
3202 tty->print_cr(" nmethod code size : %8u bytes", nmethods_code_size);
3203 tty->print_cr(" nmethod total size : %8u bytes", nmethods_size);
3204 }
3205
3206 // Print general/accumulated JIT information.
3207 void CompileBroker::print_info(outputStream *out) {
3208 if (out == nullptr) out = tty;
3209 out->cr();
3210 out->print_cr("======================");
3211 out->print_cr(" General JIT info ");
3212 out->print_cr("======================");
3213 out->cr();
3214 out->print_cr(" JIT is : %7s", should_compile_new_jobs() ? "on" : "off");
3215 out->print_cr(" Compiler threads : %7d", (int)CICompilerCount);
3216 out->cr();
3217 out->print_cr("CodeCache overview");
3218 out->print_cr("--------------------------------------------------------");
3219 out->cr();
3220 out->print_cr(" Reserved size : %7zu KB", CodeCache::max_capacity() / K);
3221 out->print_cr(" Committed size : %7zu KB", CodeCache::capacity() / K);
3222 out->print_cr(" Unallocated capacity : %7zu KB", CodeCache::unallocated_capacity() / K);
3223 out->cr();
3224 }
3225
3226 // Note: tty_lock must not be held upon entry to this function.
3227 // Print functions called from herein do "micro-locking" on tty_lock.
3228 // That's a tradeoff which keeps together important blocks of output.
3229 // At the same time, continuous tty_lock hold time is kept in check,
3230 // preventing concurrently printing threads from stalling a long time.
3231 void CompileBroker::print_heapinfo(outputStream* out, const char* function, size_t granularity) {
3232 TimeStamp ts_total;
3233 TimeStamp ts_global;
3234 TimeStamp ts;
3235
3236 bool allFun = !strcmp(function, "all");
3237 bool aggregate = !strcmp(function, "aggregate") || !strcmp(function, "analyze") || allFun;
3238 bool usedSpace = !strcmp(function, "UsedSpace") || allFun;
3239 bool freeSpace = !strcmp(function, "FreeSpace") || allFun;
3240 bool methodCount = !strcmp(function, "MethodCount") || allFun;
3241 bool methodSpace = !strcmp(function, "MethodSpace") || allFun;
3242 bool methodAge = !strcmp(function, "MethodAge") || allFun;
3243 bool methodNames = !strcmp(function, "MethodNames") || allFun;
3244 bool discard = !strcmp(function, "discard") || allFun;
3245
3246 if (out == nullptr) {
3247 out = tty;
3248 }
3249
3250 if (!(aggregate || usedSpace || freeSpace || methodCount || methodSpace || methodAge || methodNames || discard)) {
3251 out->print_cr("\n__ CodeHeapStateAnalytics: Function %s is not supported", function);
3252 out->cr();
3253 return;
3254 }
3255
3256 ts_total.update(); // record starting point
3257
3258 if (aggregate) {
3259 print_info(out);
3260 }
3261
3262 // We hold the CodeHeapStateAnalytics_lock all the time, from here until we leave this function.
3263 // That prevents other threads from destroying (making inconsistent) our view on the CodeHeap.
3264 // When we request individual parts of the analysis via the jcmd interface, it is possible
3265 // that in between another thread (another jcmd user or the vm running into CodeCache OOM)
3266 // updated the aggregated data. We will then see a modified, but again consistent, view
3267 // on the CodeHeap. That's a tolerable tradeoff we have to accept because we can't hold
3268 // a lock across user interaction.
3269
3270 // We should definitely acquire this lock before acquiring Compile_lock and CodeCache_lock.
3271 // CodeHeapStateAnalytics_lock may be held by a concurrent thread for a long time,
3272 // leading to an unnecessarily long hold time of the other locks we acquired before.
3273 ts.update(); // record starting point
3274 MutexLocker mu0(CodeHeapStateAnalytics_lock, Mutex::_safepoint_check_flag);
3275 out->print_cr("\n__ CodeHeapStateAnalytics lock wait took %10.3f seconds _________\n", ts.seconds());
3276
3277 // Holding the CodeCache_lock protects from concurrent alterations of the CodeCache.
3278 // Unfortunately, such protection is not sufficient:
3279 // When a new nmethod is created via ciEnv::register_method(), the
3280 // Compile_lock is taken first. After some initializations,
3281 // nmethod::new_nmethod() takes over, grabbing the CodeCache_lock
3282 // immediately (after finalizing the oop references). To lock out concurrent
3283 // modifiers, we have to grab both locks as well in the described sequence.
3284 //
3285 // If we serve an "allFun" call, it is beneficial to hold CodeCache_lock and Compile_lock
3286 // for the entire duration of aggregation and printing. That makes sure we see
3287 // a consistent picture and do not run into issues caused by concurrent alterations.
3288 bool should_take_Compile_lock = !SafepointSynchronize::is_at_safepoint() &&
3289 !Compile_lock->owned_by_self();
3290 bool should_take_CodeCache_lock = !SafepointSynchronize::is_at_safepoint() &&
3291 !CodeCache_lock->owned_by_self();
3292 bool take_global_lock_1 = allFun && should_take_Compile_lock;
3293 bool take_global_lock_2 = allFun && should_take_CodeCache_lock;
3294 bool take_function_lock_1 = !allFun && should_take_Compile_lock;
3295 bool take_function_lock_2 = !allFun && should_take_CodeCache_lock;
3296 bool take_global_locks = take_global_lock_1 || take_global_lock_2;
3297 bool take_function_locks = take_function_lock_1 || take_function_lock_2;
3298
3299 ts_global.update(); // record starting point
3300
3301 ConditionalMutexLocker mu1(Compile_lock, take_global_lock_1, Mutex::_safepoint_check_flag);
3302 ConditionalMutexLocker mu2(CodeCache_lock, take_global_lock_2, Mutex::_no_safepoint_check_flag);
3303 if (take_global_locks) {
3304 out->print_cr("\n__ Compile & CodeCache (global) lock wait took %10.3f seconds _________\n", ts_global.seconds());
3305 ts_global.update(); // record starting point
3306 }
3307
3308 if (aggregate) {
3309 ts.update(); // record starting point
3310 ConditionalMutexLocker mu11(Compile_lock, take_function_lock_1, Mutex::_safepoint_check_flag);
3311 ConditionalMutexLocker mu22(CodeCache_lock, take_function_lock_2, Mutex::_no_safepoint_check_flag);
3312 if (take_function_locks) {
3313 out->print_cr("\n__ Compile & CodeCache (function) lock wait took %10.3f seconds _________\n", ts.seconds());
3314 }
3315
3316 ts.update(); // record starting point
3317 CodeCache::aggregate(out, granularity);
3318 if (take_function_locks) {
3319 out->print_cr("\n__ Compile & CodeCache (function) lock hold took %10.3f seconds _________\n", ts.seconds());
3320 }
3321 }
3322
3323 if (usedSpace) CodeCache::print_usedSpace(out);
3324 if (freeSpace) CodeCache::print_freeSpace(out);
3325 if (methodCount) CodeCache::print_count(out);
3326 if (methodSpace) CodeCache::print_space(out);
3327 if (methodAge) CodeCache::print_age(out);
3328 if (methodNames) {
3329 if (allFun) {
3330 // print_names() can only be used safely if the locks have been continuously held
3331 // since aggregation begin. That is true only for function "all".
3332 CodeCache::print_names(out);
3333 } else {
3334 out->print_cr("\nCodeHeapStateAnalytics: Function 'MethodNames' is only available as part of function 'all'");
3335 }
3336 }
3337 if (discard) CodeCache::discard(out);
3338
3339 if (take_global_locks) {
3340 out->print_cr("\n__ Compile & CodeCache (global) lock hold took %10.3f seconds _________\n", ts_global.seconds());
3341 }
3342 out->print_cr("\n__ CodeHeapStateAnalytics total duration %10.3f seconds _________\n", ts_total.seconds());
3343 }