1 /*
2 * Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLinkedClassBulkLoader.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "compiler/compilationPolicy.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compilerDefinitions.inline.hpp"
31 #include "compiler/compilerOracle.hpp"
32 #include "compiler/recompilationPolicy.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/method.inline.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/trainingData.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/frame.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/globals_extension.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/safepoint.hpp"
46 #include "runtime/safepointVerifiers.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Compiler.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/c2compiler.hpp"
52 #endif
53 #if INCLUDE_JVMCI
54 #include "jvmci/jvmci.hpp"
55 #endif
56
57 int64_t CompilationPolicy::_start_time = 0;
58 int CompilationPolicy::_c1_count = 0;
59 int CompilationPolicy::_c2_count = 0;
60 int CompilationPolicy::_ac_count = 0;
61 double CompilationPolicy::_increase_threshold_at_ratio = 0;
62
63 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
64
65 void compilationPolicy_init() {
66 CompilationPolicy::initialize();
67 }
68
69 int CompilationPolicy::compiler_count(CompLevel comp_level) {
70 if (is_c1_compile(comp_level)) {
71 return c1_count();
72 } else if (is_c2_compile(comp_level)) {
73 return c2_count();
74 }
75 return 0;
76 }
77
78 // Returns true if m must be compiled before executing it
79 // This is intended to force compiles for methods (usually for
80 // debugging) that would otherwise be interpreted for some reason.
81 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
82 // Don't allow Xcomp to cause compiles in replay mode
83 if (ReplayCompiles) return false;
84
85 if (m->has_compiled_code()) return false; // already compiled
86 if (!can_be_compiled(m, comp_level)) return false;
87
88 return !UseInterpreter || // must compile all methods
89 (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
90 }
91
92 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
93 if (m->method_holder()->is_not_initialized()) {
94 // 'is_not_initialized' means not only '!is_initialized', but also that
95 // initialization has not been started yet ('!being_initialized')
96 // Do not force compilation of methods in uninitialized classes.
97 return;
98 }
99 if (!m->is_native() && MethodTrainingData::have_data()) {
100 MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
101 if (mtd == nullptr) {
102 return; // there is no training data recorded for m
103 }
104 // Consider replacing conservatively compiled AOT Preload code with faster AOT code
105 nmethod* nm = m->code();
106 bool recompile = (nm != nullptr) && nm->preloaded();
107 CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
108 CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
109 if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
110 bool requires_online_compilation = false;
111 CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
112 if (ctd != nullptr) {
113 // Can't load normal AOT code - not all dependancies are ready,
114 // request normal compilation
115 requires_online_compilation = (ctd->init_deps_left_acquire() > 0);
116 }
117 if (requires_online_compilation && recompile) {
118 // Wait when dependencies are ready to load normal AOT code
119 // if AOT Preload code is used now.
120 //
121 // FIXME. We may never (or it take long time) get all dependencies
122 // be ready to replace AOT Preload code. Consider using time and how many
123 // dependencies left to allow normal JIT compilation for replacement.
124 return;
125 }
126 if (PrintTieredEvents) {
127 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
128 }
129 CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
130 if (HAS_PENDING_EXCEPTION) {
131 CLEAR_PENDING_EXCEPTION;
132 }
133 }
134 }
135 }
136
137 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
138 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
139 // don't force compilation, resolve was on behalf of compiler
140 return;
141 }
142 if (m->method_holder()->is_not_initialized()) {
143 // 'is_not_initialized' means not only '!is_initialized', but also that
144 // initialization has not been started yet ('!being_initialized')
145 // Do not force compilation of methods in uninitialized classes.
146 // Note that doing this would throw an assert later,
147 // in CompileBroker::compile_method.
148 // We sometimes use the link resolver to do reflective lookups
149 // even before classes are initialized.
150 return;
151 }
152
153 if (must_be_compiled(m)) {
154 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
155 CompLevel level = initial_compile_level(m);
156 if (PrintTieredEvents) {
157 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
158 }
159 // Test AOT code too
160 bool requires_online_compilation = false;
161 if (TrainingData::have_data()) {
162 MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
163 if (mtd != nullptr) {
164 CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
165 if (ctd != nullptr) {
166 requires_online_compilation = (ctd->init_deps_left_acquire() > 0);
167 }
168 }
169 }
170 CompileBroker::compile_method(m, InvocationEntryBci, level, 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
171 }
172 }
173
174 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, JavaThread* current) {
175 if (!klass->has_init_deps_processed()) {
176 ResourceMark rm;
177 log_debug(training)("Replay training: %s", klass->external_name());
178
179 KlassTrainingData* ktd = KlassTrainingData::find(klass);
180 if (ktd != nullptr) {
181 guarantee(ktd->has_holder(), "");
182 ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
183 assert(klass->has_init_deps_processed(), "");
184
185 if (AOTCompileEagerly) {
186 GrowableArray<MethodTrainingData*> mtds;
187 ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
188 if (ctd->init_deps_left_acquire() == 0) {
189 MethodTrainingData* mtd = ctd->method();
190 if (mtd->has_holder()) {
191 mtds.push(mtd);
192 }
193 }
194 });
195 for (int i = 0; i < mtds.length(); i++) {
196 MethodTrainingData* mtd = mtds.at(i);
197 const methodHandle mh(current, const_cast<Method*>(mtd->holder()));
198 CompilationPolicy::maybe_compile_early(mh, current);
199 }
200 }
201 }
202 }
203 }
204
205 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, JavaThread* current) {
206 assert(klass->is_initialized(), "");
207 if (TrainingData::have_data() && klass->in_aot_cache()) {
208 _training_replay_queue.push(klass, TrainingReplayQueue_lock, current);
209 }
210 }
211
212 // For TrainingReplayQueue
213 template<>
214 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
215 int pos = 0;
216 for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
217 ResourceMark rm;
218 InstanceKlass* ik = cur->value();
219 st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
220 }
221 }
222
223 void CompilationPolicy::replay_training_at_init_loop(JavaThread* current) {
224 while (!CompileBroker::is_compilation_disabled_forever()) {
225 InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, current);
226 if (ik != nullptr) {
227 replay_training_at_init_impl(ik, current);
228 }
229 }
230 }
231
232 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
233 if (comp_level == CompLevel_any) {
234 if (CompilerConfig::is_c1_only()) {
235 comp_level = CompLevel_simple;
236 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
237 comp_level = CompLevel_full_optimization;
238 }
239 }
240 return comp_level;
241 }
242
243 // Returns true if m is allowed to be compiled
244 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
245 // allow any levels for WhiteBox
246 assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
247
248 if (m->is_abstract()) return false;
249 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
250
251 // Math intrinsics should never be compiled as this can lead to
252 // monotonicity problems because the interpreter will prefer the
253 // compiled code to the intrinsic version. This can't happen in
254 // production because the invocation counter can't be incremented
255 // but we shouldn't expose the system to this problem in testing
256 // modes.
257 if (!AbstractInterpreter::can_be_compiled(m)) {
258 return false;
259 }
260 comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
261 if (comp_level == CompLevel_any || is_compile(comp_level)) {
262 return !m->is_not_compilable(comp_level);
263 }
264 return false;
265 }
266
267 // Returns true if m is allowed to be osr compiled
268 bool CompilationPolicy::can_be_osr_compiled(const methodHandle& m, int comp_level) {
269 bool result = false;
270 comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
271 if (comp_level == CompLevel_any || is_compile(comp_level)) {
272 result = !m->is_not_osr_compilable(comp_level);
273 }
274 return (result && can_be_compiled(m, comp_level));
275 }
276
277 bool CompilationPolicy::is_compilation_enabled() {
278 // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
279 return CompileBroker::should_compile_new_jobs();
280 }
281
282 CompileTask* CompilationPolicy::select_task_helper(CompileQueue* compile_queue) {
283 // Remove unloaded methods from the queue
284 for (CompileTask* task = compile_queue->first(); task != nullptr; ) {
285 CompileTask* next = task->next();
286 if (task->is_unloaded()) {
287 compile_queue->remove_and_mark_stale(task);
288 }
289 task = next;
290 }
291 #if INCLUDE_JVMCI
292 if (UseJVMCICompiler && !BackgroundCompilation) {
293 /*
294 * In blocking compilation mode, the CompileBroker will make
295 * compilations submitted by a JVMCI compiler thread non-blocking. These
296 * compilations should be scheduled after all blocking compilations
297 * to service non-compiler related compilations sooner and reduce the
298 * chance of such compilations timing out.
299 */
300 for (CompileTask* task = compile_queue->first(); task != nullptr; task = task->next()) {
301 if (task->is_blocking()) {
302 return task;
303 }
304 }
305 }
306 #endif
307 return compile_queue->first();
308 }
309
310 // Simple methods are as good being compiled with C1 as C2.
311 // Determine if a given method is such a case.
312 bool CompilationPolicy::is_trivial(const methodHandle& method) {
313 if (method->is_accessor() ||
314 method->is_constant_getter()) {
315 return true;
316 }
317 return false;
318 }
319
320 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
321 if (CompilationModeFlag::quick_internal()) {
322 #if INCLUDE_JVMCI
323 if (UseJVMCICompiler) {
324 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
325 if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
326 return true;
327 }
328 }
329 #endif
330 }
331 return false;
332 }
333
334 CompLevel CompilationPolicy::comp_level(Method* method) {
335 nmethod *nm = method->code();
336 if (nm != nullptr && nm->is_in_use()) {
337 return (CompLevel)nm->comp_level();
338 }
339 return CompLevel_none;
340 }
341
342 // Call and loop predicates determine whether a transition to a higher
343 // compilation level should be performed (pointers to predicate functions
344 // are passed to common()).
345 // Tier?LoadFeedback is basically a coefficient that determines of
346 // how many methods per compiler thread can be in the queue before
347 // the threshold values double.
348 class LoopPredicate : AllStatic {
349 public:
350 static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
351 double threshold_scaling;
352 if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
353 scale *= threshold_scaling;
354 }
355 switch(cur_level) {
356 case CompLevel_none:
357 case CompLevel_limited_profile:
358 return b >= Tier3BackEdgeThreshold * scale;
359 case CompLevel_full_profile:
360 return b >= Tier4BackEdgeThreshold * scale;
361 default:
362 return true;
363 }
364 }
365
366 static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
367 double k = 1;
368 switch(cur_level) {
369 case CompLevel_none:
370 // Fall through
371 case CompLevel_limited_profile: {
372 k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
373 break;
374 }
375 case CompLevel_full_profile: {
376 k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
377 break;
378 }
379 default:
380 return true;
381 }
382 return apply_scaled(method, cur_level, i, b, k);
383 }
384 };
385
386 class CallPredicate : AllStatic {
387 public:
388 static bool apply_scaled(const methodHandle& method, CompLevel cur_level, int i, int b, double scale) {
389 double threshold_scaling;
390 if (CompilerOracle::has_option_value(method, CompileCommandEnum::CompileThresholdScaling, threshold_scaling)) {
391 scale *= threshold_scaling;
392 }
393 switch(cur_level) {
394 case CompLevel_none:
395 case CompLevel_limited_profile:
396 return (i >= Tier3InvocationThreshold * scale) ||
397 (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale);
398 case CompLevel_full_profile:
399 return (i >= Tier4InvocationThreshold * scale) ||
400 (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale);
401 default:
402 return true;
403 }
404 }
405
406 static bool apply(const methodHandle& method, CompLevel cur_level, int i, int b) {
407 double k = 1;
408 switch(cur_level) {
409 case CompLevel_none:
410 case CompLevel_limited_profile: {
411 k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
412 break;
413 }
414 case CompLevel_full_profile: {
415 k = CompilationPolicy::threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
416 break;
417 }
418 default:
419 return true;
420 }
421 return apply_scaled(method, cur_level, i, b, k);
422 }
423 };
424
425 double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
426 int comp_count = compiler_count(level);
427 if (comp_count > 0 && feedback_k > 0) {
428 double queue_size = CompileBroker::queue_size(level);
429 double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
430
431 // Increase C1 compile threshold when the code cache is filled more
432 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
433 // The main intention is to keep enough free space for C2 compiled code
434 // to achieve peak performance if the code cache is under stress.
435 if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level)) {
436 double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
437 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
438 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
439 }
440 }
441 return k;
442 }
443 return 1;
444 }
445
446 void CompilationPolicy::print_counters_on(outputStream* st, const char* prefix, Method* m) {
447 int invocation_count = m->invocation_count();
448 int backedge_count = m->backedge_count();
449 MethodData* mdh = m->method_data();
450 int mdo_invocations = 0, mdo_backedges = 0;
451 int mdo_invocations_start = 0, mdo_backedges_start = 0;
452 if (mdh != nullptr) {
453 mdo_invocations = mdh->invocation_count();
454 mdo_backedges = mdh->backedge_count();
455 mdo_invocations_start = mdh->invocation_count_start();
456 mdo_backedges_start = mdh->backedge_count_start();
457 }
458 st->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
459 invocation_count, backedge_count, prefix,
460 mdo_invocations, mdo_invocations_start,
461 mdo_backedges, mdo_backedges_start);
462 st->print(" %smax levels=%d,%d", prefix, m->highest_comp_level(), m->highest_osr_comp_level());
463 }
464
465 void CompilationPolicy::print_training_data_on(outputStream* st, const char* prefix, Method* method, CompLevel cur_level) {
466 methodHandle m(Thread::current(), method);
467 st->print(" %smtd: ", prefix);
468 MethodTrainingData* mtd = MethodTrainingData::find(m);
469 if (mtd == nullptr) {
470 st->print("null");
471 } else {
472 if (should_delay_standard_transition(m, cur_level, mtd)) {
473 st->print("delayed, ");
474 }
475 MethodData* md = mtd->final_profile();
476 st->print("mdo=");
477 if (md == nullptr) {
478 st->print("null");
479 } else {
480 int mdo_invocations = md->invocation_count();
481 int mdo_backedges = md->backedge_count();
482 int mdo_invocations_start = md->invocation_count_start();
483 int mdo_backedges_start = md->backedge_count_start();
484 st->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
485 }
486 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
487 st->print(", deps=");
488 if (ctd == nullptr) {
489 st->print("null");
490 } else {
491 st->print("%d", ctd->init_deps_left_acquire());
492 }
493 }
494 }
495
496 // Print an event.
497 void CompilationPolicy::print_event_on(outputStream *st, EventType type, Method* m, Method* im, int bci, CompLevel level) {
498 bool inlinee_event = m != im;
499
500 st->print("%lf: [", os::elapsedTime());
501
502 switch(type) {
503 case CALL:
504 st->print("call");
505 break;
506 case LOOP:
507 st->print("loop");
508 break;
509 case COMPILE:
510 st->print("compile");
511 break;
512 case FORCE_COMPILE:
513 st->print("force-compile");
514 break;
515 case FORCE_RECOMPILE:
516 st->print("force-recompile");
517 break;
518 case REMOVE_FROM_QUEUE:
519 st->print("remove-from-queue");
520 break;
521 case UPDATE_IN_QUEUE:
522 st->print("update-in-queue");
523 break;
524 case REPROFILE:
525 st->print("reprofile");
526 break;
527 case MAKE_NOT_ENTRANT:
528 st->print("make-not-entrant");
529 break;
530 default:
531 st->print("unknown");
532 }
533
534 st->print(" level=%d ", level);
535
536 ResourceMark rm;
537 char *method_name = m->name_and_sig_as_C_string();
538 st->print("[%s", method_name);
539 if (inlinee_event) {
540 char *inlinee_name = im->name_and_sig_as_C_string();
541 st->print(" [%s]] ", inlinee_name);
542 }
543 else st->print("] ");
544 st->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
545 CompileBroker::queue_size(CompLevel_full_optimization));
546
547 st->print(" rate=");
548 if (m->prev_time() == 0) st->print("n/a");
549 else st->print("%f", m->rate());
550
551 RecompilationPolicy::print_load_average(st);
552
553 st->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
554 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
555
556 if (type != COMPILE) {
557 print_counters_on(st, "", m);
558 if (inlinee_event) {
559 print_counters_on(st, "inlinee ", im);
560 }
561 st->print(" compilable=");
562 bool need_comma = false;
563 if (!m->is_not_compilable(CompLevel_full_profile)) {
564 st->print("c1");
565 need_comma = true;
566 }
567 if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
568 if (need_comma) st->print(",");
569 st->print("c1-osr");
570 need_comma = true;
571 }
572 if (!m->is_not_compilable(CompLevel_full_optimization)) {
573 if (need_comma) st->print(",");
574 st->print("c2");
575 need_comma = true;
576 }
577 if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
578 if (need_comma) st->print(",");
579 st->print("c2-osr");
580 }
581 st->print(" status=");
582 if (m->queued_for_compilation()) {
583 st->print("in-queue");
584 } else st->print("idle");
585
586 print_training_data_on(st, "", m, level);
587 if (inlinee_event) {
588 print_training_data_on(st, "inlinee ", im, level);
589 }
590 }
591 st->print_cr("]");
592
593 }
594
595 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
596 stringStream s;
597 print_event_on(&s, type, m, im, bci, level);
598 ResourceMark rm;
599 tty->print("%s", s.as_string());
600 }
601
602 void CompilationPolicy::initialize() {
603 if (!CompilerConfig::is_interpreter_only()) {
604 int count = CICompilerCount;
605 bool c1_only = CompilerConfig::is_c1_only();
606 bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
607 int min_count = (c1_only || c2_only) ? 1 : 2;
608
609 #ifdef _LP64
610 // Turn on ergonomic compiler count selection
611 if (AOTCodeCache::maybe_dumping_code()) {
612 // Assembly phase runs C1 and C2 compilation in separate phases,
613 // and can use all the CPU threads it can reach. Adjust the common
614 // options before policy starts overwriting them.
615 FLAG_SET_ERGO_IF_DEFAULT(UseDynamicNumberOfCompilerThreads, false);
616 FLAG_SET_ERGO_IF_DEFAULT(CICompilerCountPerCPU, false);
617 if (FLAG_IS_DEFAULT(CICompilerCount)) {
618 count = MAX2(count, os::active_processor_count());
619 }
620 }
621 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
622 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
623 }
624 if (CICompilerCountPerCPU) {
625 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
626 int log_cpu = log2i(os::active_processor_count());
627 int loglog_cpu = log2i(MAX2(log_cpu, 1));
628 count = MAX2(log_cpu * loglog_cpu * 3 / 2, min_count);
629 }
630 if (FLAG_IS_DEFAULT(CICompilerCount)) {
631 // Make sure there is enough space in the code cache to hold all the compiler buffers
632 size_t c1_size = 0;
633 #ifdef COMPILER1
634 c1_size = Compiler::code_buffer_size();
635 #endif
636 size_t c2_size = 0;
637 #ifdef COMPILER2
638 c2_size = C2Compiler::initial_code_buffer_size();
639 #endif
640 size_t buffer_size = 0;
641 if (c1_only) {
642 buffer_size = c1_size;
643 } else if (c2_only) {
644 buffer_size = c2_size;
645 } else {
646 buffer_size = c1_size / 3 + 2 * c2_size / 3;
647 }
648 size_t max_count = (NonNMethodCodeHeapSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / buffer_size;
649 if ((size_t)count > max_count) {
650 // Lower the compiler count such that all buffers fit into the code cache
651 count = MAX2((int)max_count, min_count);
652 }
653 FLAG_SET_ERGO(CICompilerCount, count);
654 }
655 #else
656 // On 32-bit systems, the number of compiler threads is limited to 3.
657 // On these systems, the virtual address space available to the JVM
658 // is usually limited to 2-4 GB (the exact value depends on the platform).
659 // As the compilers (especially C2) can consume a large amount of
660 // memory, scaling the number of compiler threads with the number of
661 // available cores can result in the exhaustion of the address space
662 /// available to the VM and thus cause the VM to crash.
663 if (FLAG_IS_DEFAULT(CICompilerCount)) {
664 count = 3;
665 FLAG_SET_ERGO(CICompilerCount, count);
666 }
667 #endif // _LP64
668
669 if (c1_only) {
670 // No C2 compiler threads are needed
671 set_c1_count(count);
672 } else if (c2_only) {
673 // No C1 compiler threads are needed
674 set_c2_count(count);
675 } else {
676 #if INCLUDE_JVMCI
677 if (UseJVMCICompiler && UseJVMCINativeLibrary) {
678 int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
679 int c1_count = MAX2(count - libjvmci_count, 1);
680 set_c2_count(libjvmci_count);
681 set_c1_count(c1_count);
682 } else
683 #endif
684 {
685 set_c1_count(MAX2(count / 3, 1));
686 set_c2_count(MAX2(count - c1_count(), 1));
687 }
688 }
689 if (AOTCodeCache::is_code_load_thread_on()) {
690 set_ac_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 AOT code in parallel
691 }
692 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
693 set_increase_threshold_at_ratio();
694 } else {
695 // Interpreter mode creates no compilers
696 FLAG_SET_ERGO(CICompilerCount, 0);
697 }
698 set_start_time(nanos_to_millis(os::javaTimeNanos()));
699 }
700
701
702 #ifdef ASSERT
703 bool CompilationPolicy::verify_level(CompLevel level) {
704 if (TieredCompilation && level > TieredStopAtLevel) {
705 return false;
706 }
707 // Check if there is a compiler to process the requested level
708 if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
709 return false;
710 }
711 if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
712 return false;
713 }
714
715 // Interpreter level is always valid.
716 if (level == CompLevel_none) {
717 return true;
718 }
719 if (CompilationModeFlag::normal()) {
720 return true;
721 } else if (CompilationModeFlag::quick_only()) {
722 return level == CompLevel_simple;
723 } else if (CompilationModeFlag::high_only()) {
724 return level == CompLevel_full_optimization;
725 } else if (CompilationModeFlag::high_only_quick_internal()) {
726 return level == CompLevel_full_optimization || level == CompLevel_simple;
727 }
728 return false;
729 }
730 #endif
731
732
733 CompLevel CompilationPolicy::highest_compile_level() {
734 CompLevel level = CompLevel_none;
735 // Setup the maximum level available for the current compiler configuration.
736 if (!CompilerConfig::is_interpreter_only()) {
737 if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
738 level = CompLevel_full_optimization;
739 } else if (CompilerConfig::is_c1_enabled()) {
740 if (CompilerConfig::is_c1_simple_only()) {
741 level = CompLevel_simple;
742 } else {
743 level = CompLevel_full_profile;
744 }
745 }
746 }
747 // Clamp the maximum level with TieredStopAtLevel.
748 if (TieredCompilation) {
749 level = MIN2(level, (CompLevel) TieredStopAtLevel);
750 }
751
752 // Fix it up if after the clamping it has become invalid.
753 // Bring it monotonically down depending on the next available level for
754 // the compilation mode.
755 if (!CompilationModeFlag::normal()) {
756 // a) quick_only - levels 2,3,4 are invalid; levels -1,0,1 are valid;
757 // b) high_only - levels 1,2,3 are invalid; levels -1,0,4 are valid;
758 // c) high_only_quick_internal - levels 2,3 are invalid; levels -1,0,1,4 are valid.
759 if (CompilationModeFlag::quick_only()) {
760 if (level == CompLevel_limited_profile || level == CompLevel_full_profile || level == CompLevel_full_optimization) {
761 level = CompLevel_simple;
762 }
763 } else if (CompilationModeFlag::high_only()) {
764 if (level == CompLevel_simple || level == CompLevel_limited_profile || level == CompLevel_full_profile) {
765 level = CompLevel_none;
766 }
767 } else if (CompilationModeFlag::high_only_quick_internal()) {
768 if (level == CompLevel_limited_profile || level == CompLevel_full_profile) {
769 level = CompLevel_simple;
770 }
771 }
772 }
773
774 assert(verify_level(level), "Invalid highest compilation level: %d", level);
775 return level;
776 }
777
778 CompLevel CompilationPolicy::limit_level(CompLevel level) {
779 level = MIN2(level, highest_compile_level());
780 assert(verify_level(level), "Invalid compilation level: %d", level);
781 return level;
782 }
783
784 CompLevel CompilationPolicy::initial_compile_level(const methodHandle& method) {
785 CompLevel level = CompLevel_any;
786 if (CompilationModeFlag::normal()) {
787 level = CompLevel_full_profile;
788 } else if (CompilationModeFlag::quick_only()) {
789 level = CompLevel_simple;
790 } else if (CompilationModeFlag::high_only()) {
791 level = CompLevel_full_optimization;
792 } else if (CompilationModeFlag::high_only_quick_internal()) {
793 if (force_comp_at_level_simple(method)) {
794 level = CompLevel_simple;
795 } else {
796 level = CompLevel_full_optimization;
797 }
798 }
799 assert(level != CompLevel_any, "Unhandled compilation mode");
800 return limit_level(level);
801 }
802
803 // Set carry flags on the counters if necessary
804 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
805 MethodCounters *mcs = method->method_counters();
806 if (mcs != nullptr) {
807 mcs->invocation_counter()->set_carry_on_overflow();
808 mcs->backedge_counter()->set_carry_on_overflow();
809 }
810 MethodData* mdo = method->method_data();
811 if (mdo != nullptr) {
812 mdo->invocation_counter()->set_carry_on_overflow();
813 mdo->backedge_counter()->set_carry_on_overflow();
814 }
815 }
816
817 // Called with the queue locked and with at least one element
818 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
819 CompileTask *max_blocking_task = nullptr;
820 CompileTask *max_task = nullptr;
821 Method* max_method = nullptr;
822
823 int64_t t = nanos_to_millis(os::javaTimeNanos());
824 // Iterate through the queue and find a method with a maximum rate.
825 for (CompileTask* task = compile_queue->first(); task != nullptr;) {
826 CompileTask* next_task = task->next();
827 // If a method was unloaded or has been stale for some time, remove it from the queue.
828 // Blocking tasks and tasks submitted from whitebox API don't become stale
829 if (task->is_unloaded()) {
830 compile_queue->remove_and_mark_stale(task);
831 task = next_task;
832 continue;
833 }
834 if (task->is_aot_load()) {
835 // AOTCodeCache tasks are on separate queue, and they should load fast. There is no need to walk
836 // the rest of the queue, just take the task and go.
837 return task;
838 }
839 if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
840 // CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
841 // selection and/or any level adjustments. Just return them in order.
842 return task;
843 }
844 Method* method = task->method();
845 methodHandle mh(THREAD, method);
846 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
847 if (PrintTieredEvents) {
848 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
849 }
850 method->clear_queued_for_compilation();
851 method->set_pending_queue_processed(false);
852 compile_queue->remove_and_mark_stale(task);
853 task = next_task;
854 continue;
855 }
856 update_rate(t, mh);
857 if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
858 // Select a method with the highest rate
859 max_task = task;
860 max_method = method;
861 }
862
863 if (task->is_blocking()) {
864 if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
865 max_blocking_task = task;
866 }
867 }
868
869 task = next_task;
870 }
871
872 if (max_blocking_task != nullptr) {
873 // In blocking compilation mode, the CompileBroker will make
874 // compilations submitted by a JVMCI compiler thread non-blocking. These
875 // compilations should be scheduled after all blocking compilations
876 // to service non-compiler related compilations sooner and reduce the
877 // chance of such compilations timing out.
878 max_task = max_blocking_task;
879 max_method = max_task->method();
880 }
881
882 methodHandle max_method_h(THREAD, max_method);
883
884 if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
885 max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
886 max_task->set_comp_level(CompLevel_limited_profile);
887
888 if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
889 false /* requires_online_compilation */,
890 CompileTask::Reason_None)) {
891 if (PrintTieredEvents) {
892 print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
893 }
894 compile_queue->remove_and_mark_stale(max_task);
895 max_method->clear_queued_for_compilation();
896 return nullptr;
897 }
898
899 if (PrintTieredEvents) {
900 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
901 }
902 }
903
904 return max_task;
905 }
906
907 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
908 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
909 if (PrintTieredEvents) {
910 print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
911 }
912 MethodData* mdo = sd->method()->method_data();
913 if (mdo != nullptr) {
914 mdo->reset_start_counters();
915 }
916 if (sd->is_top()) break;
917 }
918 }
919
920 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
921 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
922 if (PrintTieredEvents) {
923 print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
924 }
925
926 if (comp_level == CompLevel_none &&
927 JvmtiExport::can_post_interpreter_events() &&
928 THREAD->is_interp_only_mode()) {
929 return nullptr;
930 }
931 if (ReplayCompiles) {
932 // Don't trigger other compiles in testing mode
933 return nullptr;
934 }
935
936 handle_counter_overflow(method);
937 if (method() != inlinee()) {
938 handle_counter_overflow(inlinee);
939 }
940
941 if (bci == InvocationEntryBci) {
942 method_invocation_event(method, inlinee, comp_level, nm, THREAD);
943 } else {
944 // method == inlinee if the event originated in the main method
945 method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
946 // Check if event led to a higher level OSR compilation
947 CompLevel expected_comp_level = MIN2(CompLevel_full_optimization, static_cast<CompLevel>(comp_level + 1));
948 if (!CompilationModeFlag::disable_intermediate() && inlinee->is_not_osr_compilable(expected_comp_level)) {
949 // It's not possible to reach the expected level so fall back to simple.
950 expected_comp_level = CompLevel_simple;
951 }
952 CompLevel max_osr_level = static_cast<CompLevel>(inlinee->highest_osr_comp_level());
953 if (max_osr_level >= expected_comp_level) { // fast check to avoid locking in a typical scenario
954 nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, expected_comp_level, false);
955 assert(osr_nm == nullptr || osr_nm->comp_level() >= expected_comp_level, "lookup_osr_nmethod_for is broken");
956 if (osr_nm != nullptr && osr_nm->comp_level() != comp_level) {
957 // Perform OSR with new nmethod
958 return osr_nm;
959 }
960 }
961 }
962 return nullptr;
963 }
964
965 // Check if the method can be compiled, change level if necessary
966 void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level, TRAPS) {
967 assert(verify_level(level), "Invalid compilation level requested: %d", level);
968
969 if (level == CompLevel_none) {
970 if (mh->has_compiled_code()) {
971 // Happens when we switch to interpreter to profile.
972 MutexLocker ml(Compile_lock);
973 NoSafepointVerifier nsv;
974 if (mh->has_compiled_code()) {
975 mh->code()->make_not_used();
976 }
977 // Deoptimize immediately (we don't have to wait for a compile).
978 JavaThread* jt = THREAD;
979 RegisterMap map(jt,
980 RegisterMap::UpdateMap::skip,
981 RegisterMap::ProcessFrames::include,
982 RegisterMap::WalkContinuation::skip);
983 frame fr = jt->last_frame().sender(&map);
984 Deoptimization::deoptimize_frame(jt, fr.id());
985 }
986 return;
987 }
988
989 // Check if the method can be compiled. Additional logic for TieredCompilation:
990 // If it cannot be compiled with C1, continue profiling in the interpreter
991 // and then compile with C2 (the transition function will request that,
992 // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
993 // pure C1.
994 if ((bci == InvocationEntryBci && !can_be_compiled(mh, level))) {
995 if (!CompilationModeFlag::disable_intermediate() &&
996 level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
997 compile(mh, bci, CompLevel_simple, THREAD);
998 }
999 return;
1000 }
1001 if ((bci != InvocationEntryBci && !can_be_osr_compiled(mh, level))) {
1002 if (!CompilationModeFlag::disable_intermediate() &&
1003 level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
1004 nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
1005 if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
1006 // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
1007 osr_nm->make_not_entrant(nmethod::InvalidationReason::OSR_INVALIDATION_FOR_COMPILING_WITH_C1);
1008 }
1009 compile(mh, bci, CompLevel_simple, THREAD);
1010 }
1011 return;
1012 }
1013 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
1014 return;
1015 }
1016 if (!CompileBroker::compilation_is_in_queue(mh)) {
1017 if (PrintTieredEvents) {
1018 print_event(COMPILE, mh(), mh(), bci, level);
1019 }
1020 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1021 update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1022 bool requires_online_compilation = false;
1023 if (TrainingData::have_data()) {
1024 MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
1025 if (mtd != nullptr) {
1026 CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1027 if (ctd != nullptr) {
1028 requires_online_compilation = (ctd->init_deps_left_acquire() > 0);
1029 }
1030 }
1031 }
1032 CompileBroker::compile_method(mh, bci, level, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1033 }
1034 }
1035
1036 // update_rate() is called from select_task() while holding a compile queue lock.
1037 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1038 // Skip update if counters are absent.
1039 // Can't allocate them since we are holding compile queue lock.
1040 if (method->method_counters() == nullptr) return;
1041
1042 if (is_old(method)) {
1043 // We don't remove old methods from the queue,
1044 // so we can just zero the rate.
1045 method->set_rate(0);
1046 return;
1047 }
1048
1049 // We don't update the rate if we've just came out of a safepoint.
1050 // delta_s is the time since last safepoint in milliseconds.
1051 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1052 int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1053 // How many events were there since the last time?
1054 int event_count = method->invocation_count() + method->backedge_count();
1055 int delta_e = event_count - method->prev_event_count();
1056
1057 // We should be running for at least 1ms.
1058 if (delta_s >= TieredRateUpdateMinTime) {
1059 // And we must've taken the previous point at least 1ms before.
1060 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1061 method->set_prev_time(t);
1062 method->set_prev_event_count(event_count);
1063 method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1064 } else {
1065 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1066 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1067 method->set_rate(0);
1068 }
1069 }
1070 }
1071 }
1072
1073 // Check if this method has been stale for a given number of milliseconds.
1074 // See select_task().
1075 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1076 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1077 int64_t delta_t = t - method->prev_time();
1078 if (delta_t > timeout && delta_s > timeout) {
1079 int event_count = method->invocation_count() + method->backedge_count();
1080 int delta_e = event_count - method->prev_event_count();
1081 // Return true if there were no events.
1082 return delta_e == 0;
1083 }
1084 return false;
1085 }
1086
1087 // We don't remove old methods from the compile queue even if they have
1088 // very low activity. See select_task().
1089 bool CompilationPolicy::is_old(const methodHandle& method) {
1090 int i = method->invocation_count();
1091 int b = method->backedge_count();
1092 double k = TieredOldPercentage / 100.0;
1093
1094 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1095 }
1096
1097 double CompilationPolicy::weight(Method* method) {
1098 return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1099 }
1100
1101 // Apply heuristics and return true if x should be compiled before y
1102 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1103 if (x->highest_comp_level() > y->highest_comp_level()) {
1104 // recompilation after deopt
1105 return true;
1106 } else
1107 if (x->highest_comp_level() == y->highest_comp_level()) {
1108 if (weight(x) > weight(y)) {
1109 return true;
1110 }
1111 }
1112 return false;
1113 }
1114
1115 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1116 assert(!x->is_aot_load() && !y->is_aot_load(), "AOT code caching tasks are not expected here");
1117 if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1118 return true;
1119 }
1120 return false;
1121 }
1122
1123 // Is method profiled enough?
1124 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1125 MethodData* mdo = method->method_data();
1126 if (mdo != nullptr) {
1127 int i = mdo->invocation_count_delta();
1128 int b = mdo->backedge_count_delta();
1129 return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1130 }
1131 return false;
1132 }
1133
1134
1135 // Determine is a method is mature.
1136 bool CompilationPolicy::is_mature(MethodData* mdo) {
1137 if (Arguments::is_compiler_only()) {
1138 // Always report profiles as immature with -Xcomp
1139 return false;
1140 }
1141 methodHandle mh(Thread::current(), mdo->method());
1142 if (mdo != nullptr) {
1143 int i = mdo->invocation_count();
1144 int b = mdo->backedge_count();
1145 double k = ProfileMaturityPercentage / 100.0;
1146 return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1147 }
1148 return false;
1149 }
1150
1151 // If a method is old enough and is still in the interpreter we would want to
1152 // start profiling without waiting for the compiled method to arrive.
1153 // We also take the load on compilers into the account.
1154 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1155 if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1156 return false;
1157 }
1158
1159 if (TrainingData::have_data()) {
1160 MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1161 if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1162 return true;
1163 }
1164 }
1165
1166 if (is_old(method)) {
1167 return true;
1168 }
1169 int i = method->invocation_count();
1170 int b = method->backedge_count();
1171 double k = Tier0ProfilingStartPercentage / 100.0;
1172
1173 // If the top level compiler is not keeping up, delay profiling.
1174 if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1175 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1176 }
1177 return false;
1178 }
1179
1180 // Inlining control: if we're compiling a profiled method with C1 and the callee
1181 // is known to have OSRed in a C2 version, don't inline it.
1182 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1183 CompLevel comp_level = (CompLevel)env->comp_level();
1184 if (comp_level == CompLevel_full_profile ||
1185 comp_level == CompLevel_limited_profile) {
1186 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1187 }
1188 return false;
1189 }
1190
1191 // Create MDO if necessary.
1192 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1193 if (mh->is_native() ||
1194 mh->is_abstract() ||
1195 mh->is_accessor() ||
1196 mh->is_constant_getter()) {
1197 return;
1198 }
1199 if (mh->method_data() == nullptr) {
1200 Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1201 }
1202 if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1203 MethodData* mdo = mh->method_data();
1204 if (mdo != nullptr) {
1205 frame last_frame = THREAD->last_frame();
1206 if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1207 int bci = last_frame.interpreter_frame_bci();
1208 address dp = mdo->bci_to_dp(bci);
1209 last_frame.interpreter_frame_set_mdp(dp);
1210 }
1211 }
1212 }
1213 }
1214
1215 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1216 precond(mtd != nullptr);
1217 precond(cur_level == CompLevel_none);
1218
1219 if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1220 return CompLevel_none;
1221 }
1222
1223 bool training_has_profile = (mtd->final_profile() != nullptr);
1224 if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1225 return CompLevel_full_profile;
1226 }
1227
1228 CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1229 switch (highest_training_level) {
1230 case CompLevel_limited_profile:
1231 case CompLevel_full_profile:
1232 return CompLevel_limited_profile;
1233 case CompLevel_simple:
1234 return CompLevel_simple;
1235 case CompLevel_none:
1236 return CompLevel_none;
1237 default:
1238 break;
1239 }
1240
1241 // Now handle the case of level 4.
1242 assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1243 if (!training_has_profile) {
1244 // The method was a part of a level 4 compile, but doesn't have a stored profile,
1245 // we need to profile it.
1246 return CompLevel_full_profile;
1247 }
1248 const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1249 // If we deopted, then we reprofile
1250 if (deopt && !is_method_profiled(method)) {
1251 return CompLevel_full_profile;
1252 }
1253
1254 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1255 assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1256 // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1257 if (SkipTier2IfPossible && ctd->init_deps_left_acquire() == 0) {
1258 if (method->method_data() == nullptr) {
1259 create_mdo(method, THREAD);
1260 }
1261 return CompLevel_full_optimization;
1262 }
1263
1264 // Otherwise go to level 2
1265 return CompLevel_limited_profile;
1266 }
1267
1268
1269 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1270 precond(mtd != nullptr);
1271 precond(cur_level == CompLevel_limited_profile);
1272
1273 // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1274
1275 // But first, check if we have a saved profile
1276 bool training_has_profile = (mtd->final_profile() != nullptr);
1277 if (!training_has_profile) {
1278 return CompLevel_full_profile;
1279 }
1280
1281
1282 assert(training_has_profile, "Have to have a profile to be here");
1283 // Check if the method is ready
1284 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1285 if (ctd != nullptr && ctd->init_deps_left_acquire() == 0) {
1286 if (method->method_data() == nullptr) {
1287 create_mdo(method, THREAD);
1288 }
1289 return CompLevel_full_optimization;
1290 }
1291
1292 // Otherwise stay at the current level
1293 return CompLevel_limited_profile;
1294 }
1295
1296
1297 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1298 precond(mtd != nullptr);
1299 precond(cur_level == CompLevel_full_profile);
1300
1301 CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1302 // We have method at the full profile level and we also know that it's possibly an important method.
1303 if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1304 // Check if it is adequately profiled
1305 if (is_method_profiled(method)) {
1306 return CompLevel_full_optimization;
1307 }
1308 }
1309
1310 // Otherwise stay at the current level
1311 return CompLevel_full_profile;
1312 }
1313
1314 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1315 precond(MethodTrainingData::have_data());
1316
1317 // If there is no training data recorded for this method, bail out.
1318 if (mtd == nullptr) {
1319 return cur_level;
1320 }
1321
1322 CompLevel next_level = cur_level;
1323 switch(cur_level) {
1324 default: break;
1325 case CompLevel_none:
1326 next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1327 break;
1328 case CompLevel_limited_profile:
1329 next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1330 break;
1331 case CompLevel_full_profile:
1332 next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1333 break;
1334 }
1335
1336 // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1337 if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1338 return CompLevel_none;
1339 }
1340 if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1341 return CompLevel_none;
1342 }
1343 return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1344 }
1345
1346 /*
1347 * Method states:
1348 * 0 - interpreter (CompLevel_none)
1349 * 1 - pure C1 (CompLevel_simple)
1350 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1351 * 3 - C1 with full profiling (CompLevel_full_profile)
1352 * 4 - C2 or Graal (CompLevel_full_optimization)
1353 *
1354 * Common state transition patterns:
1355 * a. 0 -> 3 -> 4.
1356 * The most common path. But note that even in this straightforward case
1357 * profiling can start at level 0 and finish at level 3.
1358 *
1359 * b. 0 -> 2 -> 3 -> 4.
1360 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1361 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1362 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1363 *
1364 * c. 0 -> (3->2) -> 4.
1365 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
1366 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
1367 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1368 * without full profiling while c2 is compiling.
1369 *
1370 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1371 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
1372 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1373 *
1374 * e. 0 -> 4.
1375 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1376 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1377 * the compiled version already exists).
1378 *
1379 * Note that since state 0 can be reached from any other state via deoptimization different loops
1380 * are possible.
1381 *
1382 */
1383
1384 // Common transition function. Given a predicate determines if a method should transition to another level.
1385 template<typename Predicate>
1386 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1387 CompLevel next_level = cur_level;
1388
1389 if (force_comp_at_level_simple(method)) {
1390 next_level = CompLevel_simple;
1391 } else if (is_trivial(method) || method->is_native()) {
1392 // We do not care if there is profiling data for these methods, throw them to compiler.
1393 next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1394 } else if (MethodTrainingData::have_data()) {
1395 MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1396 if (mtd == nullptr) {
1397 // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1398 // Feed it to the standard TF with no profiling delay.
1399 next_level = standard_transition<Predicate>(method, cur_level, disable_feedback);
1400 } else {
1401 next_level = trained_transition(method, cur_level, mtd, THREAD);
1402 if (cur_level == next_level && !should_delay_standard_transition(method, cur_level, mtd)) {
1403 // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1404 // In order to catch possible pathologies due to behavior change we feed the event to the regular
1405 // TF but with profiling delay.
1406 next_level = standard_transition<Predicate>(method, cur_level, disable_feedback);
1407 }
1408 }
1409 } else {
1410 next_level = standard_transition<Predicate>(method, cur_level, disable_feedback);
1411 }
1412 return (next_level != cur_level) ? limit_level(next_level) : next_level;
1413 }
1414
1415 bool CompilationPolicy::should_delay_standard_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd) {
1416 precond(mtd != nullptr);
1417 CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1418 if (highest_training_level != CompLevel_full_optimization && cur_level == CompLevel_limited_profile) {
1419 // This is a lukewarm method - it hasn't been compiled with C2 during the tranining run and is currently
1420 // running at level 2. Delay any further state changes until its counters exceed the training run counts.
1421 MethodCounters* mc = method->method_counters();
1422 if (mc == nullptr) {
1423 return false;
1424 }
1425 if (mc->invocation_counter()->carry() || mc->backedge_counter()->carry()) {
1426 return false;
1427 }
1428 if (static_cast<int>(mc->invocation_counter()->count()) <= mtd->invocation_count() &&
1429 static_cast<int>(mc->backedge_counter()->count()) <= mtd->backedge_count()) {
1430 return true;
1431 }
1432 }
1433 return false;
1434 }
1435
1436 template<typename Predicate>
1437 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
1438 CompLevel next_level = cur_level;
1439 switch(cur_level) {
1440 default: break;
1441 case CompLevel_none:
1442 next_level = transition_from_none<Predicate>(method, cur_level, disable_feedback);
1443 break;
1444 case CompLevel_limited_profile:
1445 next_level = transition_from_limited_profile<Predicate>(method, cur_level, disable_feedback);
1446 break;
1447 case CompLevel_full_profile:
1448 next_level = transition_from_full_profile<Predicate>(method, cur_level);
1449 break;
1450 }
1451 return next_level;
1452 }
1453
1454 template<typename Predicate>
1455 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
1456 precond(cur_level == CompLevel_none);
1457 CompLevel next_level = cur_level;
1458 int i = method->invocation_count();
1459 int b = method->backedge_count();
1460 // If we were at full profile level, would we switch to full opt?
1461 if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1462 next_level = CompLevel_full_optimization;
1463 } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(method, cur_level, i, b)) {
1464 // C1-generated fully profiled code is about 30% slower than the limited profile
1465 // code that has only invocation and backedge counters. The observation is that
1466 // if C2 queue is large enough we can spend too much time in the fully profiled code
1467 // while waiting for C2 to pick the method from the queue. To alleviate this problem
1468 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1469 // we choose to compile a limited profiled version and then recompile with full profiling
1470 // when the load on C2 goes down.
1471 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
1472 next_level = CompLevel_limited_profile;
1473 } else {
1474 next_level = CompLevel_full_profile;
1475 }
1476 }
1477 return next_level;
1478 }
1479
1480 template<typename Predicate>
1481 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1482 precond(cur_level == CompLevel_full_profile);
1483 CompLevel next_level = cur_level;
1484 MethodData* mdo = method->method_data();
1485 if (mdo != nullptr) {
1486 if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1487 int mdo_i = mdo->invocation_count_delta();
1488 int mdo_b = mdo->backedge_count_delta();
1489 if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1490 next_level = CompLevel_full_optimization;
1491 }
1492 } else {
1493 next_level = CompLevel_full_optimization;
1494 }
1495 }
1496 return next_level;
1497 }
1498
1499 template<typename Predicate>
1500 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
1501 precond(cur_level == CompLevel_limited_profile);
1502 CompLevel next_level = cur_level;
1503 int i = method->invocation_count();
1504 int b = method->backedge_count();
1505 MethodData* mdo = method->method_data();
1506 if (mdo != nullptr) {
1507 if (mdo->would_profile()) {
1508 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1509 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1510 Predicate::apply(method, cur_level, i, b))) {
1511 next_level = CompLevel_full_profile;
1512 }
1513 } else {
1514 next_level = CompLevel_full_optimization;
1515 }
1516 } else {
1517 // If there is no MDO we need to profile
1518 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1519 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1520 Predicate::apply(method, cur_level, i, b))) {
1521 next_level = CompLevel_full_profile;
1522 }
1523 }
1524 if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1525 next_level = CompLevel_full_optimization;
1526 }
1527 return next_level;
1528 }
1529
1530
1531 // Determine if a method should be compiled with a normal entry point at a different level.
1532 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1533 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1534 CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1535
1536 // If OSR method level is greater than the regular method level, the levels should be
1537 // equalized by raising the regular method level in order to avoid OSRs during each
1538 // invocation of the method.
1539 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1540 MethodData* mdo = method->method_data();
1541 guarantee(mdo != nullptr, "MDO should not be nullptr");
1542 if (mdo->invocation_count() >= 1) {
1543 next_level = CompLevel_full_optimization;
1544 }
1545 } else {
1546 next_level = MAX2(osr_level, next_level);
1547 }
1548
1549 return next_level;
1550 }
1551
1552 // Determine if we should do an OSR compilation of a given method.
1553 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1554 CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1555 if (cur_level == CompLevel_none) {
1556 // If there is a live OSR method that means that we deopted to the interpreter
1557 // for the transition.
1558 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1559 if (osr_level > CompLevel_none) {
1560 return osr_level;
1561 }
1562 }
1563 return next_level;
1564 }
1565
1566 // Handle the invocation event.
1567 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1568 CompLevel level, nmethod* nm, TRAPS) {
1569 if (should_create_mdo(mh, level)) {
1570 create_mdo(mh, THREAD);
1571 }
1572 CompLevel next_level = call_event(mh, level, THREAD);
1573 if (next_level != level) {
1574 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
1575 compile(mh, InvocationEntryBci, next_level, THREAD);
1576 }
1577 }
1578 }
1579
1580 // Handle the back branch event. Notice that we can compile the method
1581 // with a regular entry from here.
1582 void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh,
1583 int bci, CompLevel level, nmethod* nm, TRAPS) {
1584 if (should_create_mdo(mh, level)) {
1585 create_mdo(mh, THREAD);
1586 }
1587 // Check if MDO should be created for the inlined method
1588 if (should_create_mdo(imh, level)) {
1589 create_mdo(imh, THREAD);
1590 }
1591
1592 if (is_compilation_enabled()) {
1593 CompLevel next_osr_level = loop_event(imh, level, THREAD);
1594 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
1595 // At the very least compile the OSR version
1596 if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) {
1597 compile(imh, bci, next_osr_level, CHECK);
1598 }
1599
1600 // Use loop event as an opportunity to also check if there's been
1601 // enough calls.
1602 CompLevel cur_level, next_level;
1603 if (mh() != imh()) { // If there is an enclosing method
1604 {
1605 guarantee(nm != nullptr, "Should have nmethod here");
1606 cur_level = comp_level(mh());
1607 next_level = call_event(mh, cur_level, THREAD);
1608
1609 if (max_osr_level == CompLevel_full_optimization) {
1610 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
1611 bool make_not_entrant = false;
1612 if (nm->is_osr_method()) {
1613 // This is an osr method, just make it not entrant and recompile later if needed
1614 make_not_entrant = true;
1615 } else {
1616 if (next_level != CompLevel_full_optimization) {
1617 // next_level is not full opt, so we need to recompile the
1618 // enclosing method without the inlinee
1619 cur_level = CompLevel_none;
1620 make_not_entrant = true;
1621 }
1622 }
1623 if (make_not_entrant) {
1624 if (PrintTieredEvents) {
1625 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
1626 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
1627 }
1628 nm->make_not_entrant(nmethod::InvalidationReason::OSR_INVALIDATION_BACK_BRANCH);
1629 }
1630 }
1631 // Fix up next_level if necessary to avoid deopts
1632 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
1633 next_level = CompLevel_full_profile;
1634 }
1635 if (cur_level != next_level) {
1636 if (!CompileBroker::compilation_is_in_queue(mh)) {
1637 compile(mh, InvocationEntryBci, next_level, THREAD);
1638 }
1639 }
1640 }
1641 } else {
1642 cur_level = comp_level(mh());
1643 next_level = call_event(mh, cur_level, THREAD);
1644 if (next_level != cur_level) {
1645 if (!CompileBroker::compilation_is_in_queue(mh)) {
1646 compile(mh, InvocationEntryBci, next_level, THREAD);
1647 }
1648 }
1649 }
1650 }
1651 }
1652