6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "code/scopeDesc.hpp"
27 #include "compiler/compilationPolicy.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "compiler/compilerDefinitions.inline.hpp"
30 #include "compiler/compilerOracle.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/methodData.hpp"
33 #include "oops/method.inline.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "prims/jvmtiExport.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/deoptimization.hpp"
38 #include "runtime/frame.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals_extension.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/safepoint.hpp"
43 #include "runtime/safepointVerifiers.hpp"
44 #ifdef COMPILER1
45 #include "c1/c1_Compiler.hpp"
46 #endif
47 #ifdef COMPILER2
48 #include "opto/c2compiler.hpp"
49 #endif
50 #if INCLUDE_JVMCI
51 #include "jvmci/jvmci.hpp"
52 #endif
53
54 jlong CompilationPolicy::_start_time = 0;
55 int CompilationPolicy::_c1_count = 0;
56 int CompilationPolicy::_c2_count = 0;
57 double CompilationPolicy::_increase_threshold_at_ratio = 0;
58
59 void compilationPolicy_init() {
60 CompilationPolicy::initialize();
61 }
62
63 int CompilationPolicy::compiler_count(CompLevel comp_level) {
64 if (is_c1_compile(comp_level)) {
65 return c1_count();
66 } else if (is_c2_compile(comp_level)) {
67 return c2_count();
68 }
69 return 0;
70 }
71
72 // Returns true if m must be compiled before executing it
73 // This is intended to force compiles for methods (usually for
74 // debugging) that would otherwise be interpreted for some reason.
75 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
76 // Don't allow Xcomp to cause compiles in replay mode
77 if (ReplayCompiles) return false;
78
79 if (m->has_compiled_code()) return false; // already compiled
80 if (!can_be_compiled(m, comp_level)) return false;
81
82 return !UseInterpreter || // must compile all methods
83 (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
84 }
85
86 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
87 if (must_be_compiled(m)) {
88 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
89
90 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
91 // don't force compilation, resolve was on behalf of compiler
92 return;
93 }
94 if (m->method_holder()->is_not_initialized()) {
95 // 'is_not_initialized' means not only '!is_initialized', but also that
96 // initialization has not been started yet ('!being_initialized')
97 // Do not force compilation of methods in uninitialized classes.
98 // Note that doing this would throw an assert later,
99 // in CompileBroker::compile_method.
100 // We sometimes use the link resolver to do reflective lookups
101 // even before classes are initialized.
102 return;
103 }
104 CompLevel level = initial_compile_level(m);
105 if (PrintTieredEvents) {
106 print_event(COMPILE, m(), m(), InvocationEntryBci, level);
107 }
108 CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, CompileTask::Reason_MustBeCompiled, THREAD);
109 }
110 }
111
112 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
113 if (comp_level == CompLevel_any) {
114 if (CompilerConfig::is_c1_only()) {
115 comp_level = CompLevel_simple;
116 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
117 comp_level = CompLevel_full_optimization;
118 }
119 }
120 return comp_level;
121 }
122
123 // Returns true if m is allowed to be compiled
124 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
125 // allow any levels for WhiteBox
126 assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level");
127
128 if (m->is_abstract()) return false;
129 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
130
131 // Math intrinsics should never be compiled as this can lead to
132 // monotonicity problems because the interpreter will prefer the
133 // compiled code to the intrinsic version. This can't happen in
134 // production because the invocation counter can't be incremented
135 // but we shouldn't expose the system to this problem in testing
136 // modes.
137 if (!AbstractInterpreter::can_be_compiled(m)) {
138 return false;
139 }
140 comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
141 if (comp_level == CompLevel_any || is_compile(comp_level)) {
142 return !m->is_not_compilable(comp_level);
143 }
144 return false;
145 }
146
186 #endif
187 return compile_queue->first();
188 }
189
190 // Simple methods are as good being compiled with C1 as C2.
191 // Determine if a given method is such a case.
192 bool CompilationPolicy::is_trivial(const methodHandle& method) {
193 if (method->is_accessor() ||
194 method->is_constant_getter()) {
195 return true;
196 }
197 return false;
198 }
199
200 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
201 if (CompilationModeFlag::quick_internal()) {
202 #if INCLUDE_JVMCI
203 if (UseJVMCICompiler) {
204 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
205 if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
206 return true;
207 }
208 }
209 #endif
210 }
211 return false;
212 }
213
214 CompLevel CompilationPolicy::comp_level(Method* method) {
215 nmethod *nm = method->code();
216 if (nm != nullptr && nm->is_in_use()) {
217 return (CompLevel)nm->comp_level();
218 }
219 return CompLevel_none;
220 }
221
222 // Call and loop predicates determine whether a transition to a higher
223 // compilation level should be performed (pointers to predicate functions
224 // are passed to common()).
225 // Tier?LoadFeedback is basically a coefficient that determines of
226 // how many methods per compiler thread can be in the queue before
306 int comp_count = compiler_count(level);
307 if (comp_count > 0) {
308 double queue_size = CompileBroker::queue_size(level);
309 double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
310
311 // Increase C1 compile threshold when the code cache is filled more
312 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
313 // The main intention is to keep enough free space for C2 compiled code
314 // to achieve peak performance if the code cache is under stress.
315 if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level)) {
316 double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
317 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
318 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
319 }
320 }
321 return k;
322 }
323 return 1;
324 }
325
326 void CompilationPolicy::print_counters(const char* prefix, const Method* m) {
327 int invocation_count = m->invocation_count();
328 int backedge_count = m->backedge_count();
329 MethodData* mdh = m->method_data();
330 int mdo_invocations = 0, mdo_backedges = 0;
331 int mdo_invocations_start = 0, mdo_backedges_start = 0;
332 if (mdh != nullptr) {
333 mdo_invocations = mdh->invocation_count();
334 mdo_backedges = mdh->backedge_count();
335 mdo_invocations_start = mdh->invocation_count_start();
336 mdo_backedges_start = mdh->backedge_count_start();
337 }
338 tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
339 invocation_count, backedge_count, prefix,
340 mdo_invocations, mdo_invocations_start,
341 mdo_backedges, mdo_backedges_start);
342 tty->print(" %smax levels=%d,%d", prefix,
343 m->highest_comp_level(), m->highest_osr_comp_level());
344 }
345
346 // Print an event.
347 void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) {
348 bool inlinee_event = m != im;
349
350 ttyLocker tty_lock;
351 tty->print("%lf: [", os::elapsedTime());
352
353 switch(type) {
354 case CALL:
355 tty->print("call");
356 break;
357 case LOOP:
358 tty->print("loop");
359 break;
360 case COMPILE:
361 tty->print("compile");
362 break;
363 case REMOVE_FROM_QUEUE:
364 tty->print("remove-from-queue");
365 break;
366 case UPDATE_IN_QUEUE:
367 tty->print("update-in-queue");
368 break;
369 case REPROFILE:
370 tty->print("reprofile");
371 break;
372 case MAKE_NOT_ENTRANT:
373 tty->print("make-not-entrant");
374 break;
375 default:
376 tty->print("unknown");
377 }
378
379 tty->print(" level=%d ", level);
380
381 ResourceMark rm;
382 char *method_name = m->name_and_sig_as_C_string();
383 tty->print("[%s", method_name);
384 if (inlinee_event) {
385 char *inlinee_name = im->name_and_sig_as_C_string();
386 tty->print(" [%s]] ", inlinee_name);
387 }
388 else tty->print("] ");
389 tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
390 CompileBroker::queue_size(CompLevel_full_optimization));
391
392 tty->print(" rate=");
393 if (m->prev_time() == 0) tty->print("n/a");
394 else tty->print("%f", m->rate());
395
396 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
397 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
398
399 if (type != COMPILE) {
400 print_counters("", m);
401 if (inlinee_event) {
402 print_counters("inlinee ", im);
403 }
404 tty->print(" compilable=");
405 bool need_comma = false;
406 if (!m->is_not_compilable(CompLevel_full_profile)) {
407 tty->print("c1");
408 need_comma = true;
409 }
410 if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
411 if (need_comma) tty->print(",");
412 tty->print("c1-osr");
413 need_comma = true;
414 }
415 if (!m->is_not_compilable(CompLevel_full_optimization)) {
416 if (need_comma) tty->print(",");
417 tty->print("c2");
418 need_comma = true;
419 }
420 if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
421 if (need_comma) tty->print(",");
422 tty->print("c2-osr");
423 }
424 tty->print(" status=");
425 if (m->queued_for_compilation()) {
426 tty->print("in-queue");
427 } else tty->print("idle");
428 }
429 tty->print_cr("]");
430 }
431
432 void CompilationPolicy::initialize() {
433 if (!CompilerConfig::is_interpreter_only()) {
434 int count = CICompilerCount;
435 bool c1_only = CompilerConfig::is_c1_only();
436 bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
437
438 #ifdef _LP64
439 // Turn on ergonomic compiler count selection
440 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
441 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
442 }
443 if (CICompilerCountPerCPU) {
444 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
445 int log_cpu = log2i(os::active_processor_count());
446 int loglog_cpu = log2i(MAX2(log_cpu, 1));
447 count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
471 // available cores can result in the exhaustion of the address space
472 /// available to the VM and thus cause the VM to crash.
473 if (FLAG_IS_DEFAULT(CICompilerCount)) {
474 count = 3;
475 FLAG_SET_ERGO(CICompilerCount, count);
476 }
477 #endif
478
479 if (c1_only) {
480 // No C2 compiler thread required
481 set_c1_count(count);
482 } else if (c2_only) {
483 set_c2_count(count);
484 } else {
485 #if INCLUDE_JVMCI
486 if (UseJVMCICompiler && UseJVMCINativeLibrary) {
487 int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
488 int c1_count = MAX2(count - libjvmci_count, 1);
489 set_c2_count(libjvmci_count);
490 set_c1_count(c1_count);
491 } else
492 #endif
493 {
494 set_c1_count(MAX2(count / 3, 1));
495 set_c2_count(MAX2(count - c1_count(), 1));
496 }
497 }
498 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
499 set_increase_threshold_at_ratio();
500 }
501 set_start_time(nanos_to_millis(os::javaTimeNanos()));
502 }
503
504
505 #ifdef ASSERT
506 bool CompilationPolicy::verify_level(CompLevel level) {
507 if (TieredCompilation && level > TieredStopAtLevel) {
508 return false;
509 }
510 // Check if there is a compiler to process the requested level
511 if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
512 return false;
513 }
514 if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
515 return false;
516 }
517
518 // Interpreter level is always valid.
519 if (level == CompLevel_none) {
520 return true;
521 }
522 if (CompilationModeFlag::normal()) {
523 return true;
524 } else if (CompilationModeFlag::quick_only()) {
601 }
602 assert(level != CompLevel_any, "Unhandled compilation mode");
603 return limit_level(level);
604 }
605
606 // Set carry flags on the counters if necessary
607 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
608 MethodCounters *mcs = method->method_counters();
609 if (mcs != nullptr) {
610 mcs->invocation_counter()->set_carry_on_overflow();
611 mcs->backedge_counter()->set_carry_on_overflow();
612 }
613 MethodData* mdo = method->method_data();
614 if (mdo != nullptr) {
615 mdo->invocation_counter()->set_carry_on_overflow();
616 mdo->backedge_counter()->set_carry_on_overflow();
617 }
618 }
619
620 // Called with the queue locked and with at least one element
621 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
622 CompileTask *max_blocking_task = nullptr;
623 CompileTask *max_task = nullptr;
624 Method* max_method = nullptr;
625
626 jlong t = nanos_to_millis(os::javaTimeNanos());
627 // Iterate through the queue and find a method with a maximum rate.
628 for (CompileTask* task = compile_queue->first(); task != nullptr;) {
629 CompileTask* next_task = task->next();
630 // If a method was unloaded or has been stale for some time, remove it from the queue.
631 // Blocking tasks and tasks submitted from whitebox API don't become stale
632 if (task->is_unloaded()) {
633 compile_queue->remove_and_mark_stale(task);
634 task = next_task;
635 continue;
636 }
637 Method* method = task->method();
638 methodHandle mh(Thread::current(), method);
639 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
640 if (PrintTieredEvents) {
641 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
642 }
643 method->clear_queued_for_compilation();
644 compile_queue->remove_and_mark_stale(task);
645 task = next_task;
646 continue;
647 }
648 update_rate(t, mh);
649 if (max_task == nullptr || compare_methods(method, max_method)) {
650 // Select a method with the highest rate
651 max_task = task;
652 max_method = method;
653 }
654
655 if (task->is_blocking()) {
656 if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
657 max_blocking_task = task;
658 }
659 }
660
661 task = next_task;
662 }
663
664 if (max_blocking_task != nullptr) {
665 // In blocking compilation mode, the CompileBroker will make
666 // compilations submitted by a JVMCI compiler thread non-blocking. These
667 // compilations should be scheduled after all blocking compilations
668 // to service non-compiler related compilations sooner and reduce the
669 // chance of such compilations timing out.
670 max_task = max_blocking_task;
671 max_method = max_task->method();
672 }
673
674 methodHandle max_method_h(Thread::current(), max_method);
675
676 if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
677 max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
678 max_task->set_comp_level(CompLevel_limited_profile);
679
680 if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
681 if (PrintTieredEvents) {
682 print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
683 }
684 compile_queue->remove_and_mark_stale(max_task);
685 max_method->clear_queued_for_compilation();
686 return nullptr;
687 }
688
689 if (PrintTieredEvents) {
690 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
691 }
692 }
693
694 return max_task;
695 }
696
697 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
698 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
699 if (PrintTieredEvents) {
700 print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
701 }
702 MethodData* mdo = sd->method()->method_data();
703 if (mdo != nullptr) {
704 mdo->reset_start_counters();
705 }
706 if (sd->is_top()) break;
707 }
708 }
709
710 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
711 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
712 if (PrintTieredEvents) {
713 print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
714 }
715
716 if (comp_level == CompLevel_none &&
717 JvmtiExport::can_post_interpreter_events() &&
718 THREAD->is_interp_only_mode()) {
719 return nullptr;
720 }
721 if (ReplayCompiles) {
722 // Don't trigger other compiles in testing mode
723 return nullptr;
724 }
725
726 handle_counter_overflow(method);
727 if (method() != inlinee()) {
728 handle_counter_overflow(inlinee);
729 }
730
731 if (bci == InvocationEntryBci) {
732 method_invocation_event(method, inlinee, comp_level, nm, THREAD);
733 } else {
734 // method == inlinee if the event originated in the main method
735 method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
791 if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
792 nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
793 if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
794 // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
795 osr_nm->make_not_entrant();
796 }
797 compile(mh, bci, CompLevel_simple, THREAD);
798 }
799 return;
800 }
801 }
802 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
803 return;
804 }
805 if (!CompileBroker::compilation_is_in_queue(mh)) {
806 if (PrintTieredEvents) {
807 print_event(COMPILE, mh(), mh(), bci, level);
808 }
809 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
810 update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
811 CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, THREAD);
812 }
813 }
814
815 // update_rate() is called from select_task() while holding a compile queue lock.
816 void CompilationPolicy::update_rate(jlong t, const methodHandle& method) {
817 // Skip update if counters are absent.
818 // Can't allocate them since we are holding compile queue lock.
819 if (method->method_counters() == nullptr) return;
820
821 if (is_old(method)) {
822 // We don't remove old methods from the queue,
823 // so we can just zero the rate.
824 method->set_rate(0);
825 return;
826 }
827
828 // We don't update the rate if we've just came out of a safepoint.
829 // delta_s is the time since last safepoint in milliseconds.
830 jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
831 jlong delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
832 // How many events were there since the last time?
833 int event_count = method->invocation_count() + method->backedge_count();
834 int delta_e = event_count - method->prev_event_count();
835
836 // We should be running for at least 1ms.
837 if (delta_s >= TieredRateUpdateMinTime) {
838 // And we must've taken the previous point at least 1ms before.
839 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
840 method->set_prev_time(t);
841 method->set_prev_event_count(event_count);
842 method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
843 } else {
844 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
845 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
846 method->set_rate(0);
847 }
848 }
849 }
850 }
851
852 // Check if this method has been stale for a given number of milliseconds.
853 // See select_task().
854 bool CompilationPolicy::is_stale(jlong t, jlong timeout, const methodHandle& method) {
855 jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
856 jlong delta_t = t - method->prev_time();
857 if (delta_t > timeout && delta_s > timeout) {
858 int event_count = method->invocation_count() + method->backedge_count();
859 int delta_e = event_count - method->prev_event_count();
860 // Return true if there were no events.
861 return delta_e == 0;
862 }
863 return false;
864 }
865
866 // We don't remove old methods from the compile queue even if they have
867 // very low activity. See select_task().
868 bool CompilationPolicy::is_old(const methodHandle& method) {
869 int i = method->invocation_count();
870 int b = method->backedge_count();
871 double k = TieredOldPercentage / 100.0;
872
873 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
874 }
875
876 double CompilationPolicy::weight(Method* method) {
877 return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
878 }
879
880 // Apply heuristics and return true if x should be compiled before y
881 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
882 if (x->highest_comp_level() > y->highest_comp_level()) {
883 // recompilation after deopt
884 return true;
885 } else
886 if (x->highest_comp_level() == y->highest_comp_level()) {
887 if (weight(x) > weight(y)) {
888 return true;
889 }
890 }
891 return false;
892 }
893
894 // Is method profiled enough?
895 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
896 MethodData* mdo = method->method_data();
897 if (mdo != nullptr) {
898 int i = mdo->invocation_count_delta();
899 int b = mdo->backedge_count_delta();
900 return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
901 }
902 return false;
903 }
904
905
906 // Determine is a method is mature.
907 bool CompilationPolicy::is_mature(Method* method) {
908 if (Arguments::is_compiler_only()) {
909 // Always report profiles as immature with -Xcomp
910 return false;
911 }
912 methodHandle mh(Thread::current(), method);
913 MethodData* mdo = method->method_data();
914 if (mdo != nullptr) {
915 int i = mdo->invocation_count();
916 int b = mdo->backedge_count();
917 double k = ProfileMaturityPercentage / 100.0;
918 return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
919 }
920 return false;
921 }
922
923 // If a method is old enough and is still in the interpreter we would want to
924 // start profiling without waiting for the compiled method to arrive.
925 // We also take the load on compilers into the account.
926 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
927 if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
928 return false;
929 }
930 if (is_old(method)) {
931 return true;
932 }
933 int i = method->invocation_count();
934 int b = method->backedge_count();
935 double k = Tier0ProfilingStartPercentage / 100.0;
936
937 // If the top level compiler is not keeping up, delay profiling.
938 if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
939 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
940 }
941 return false;
942 }
943
944 // Inlining control: if we're compiling a profiled method with C1 and the callee
945 // is known to have OSRed in a C2 version, don't inline it.
946 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
947 CompLevel comp_level = (CompLevel)env->comp_level();
948 if (comp_level == CompLevel_full_profile ||
949 comp_level == CompLevel_limited_profile) {
950 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
951 }
952 return false;
953 }
954
955 // Create MDO if necessary.
956 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
957 if (mh->is_native() ||
958 mh->is_abstract() ||
959 mh->is_accessor() ||
960 mh->is_constant_getter()) {
961 return;
962 }
963 if (mh->method_data() == nullptr) {
964 Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
965 }
966 if (ProfileInterpreter) {
967 MethodData* mdo = mh->method_data();
968 if (mdo != nullptr) {
969 frame last_frame = THREAD->last_frame();
970 if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
971 int bci = last_frame.interpreter_frame_bci();
972 address dp = mdo->bci_to_dp(bci);
973 last_frame.interpreter_frame_set_mdp(dp);
974 }
975 }
976 }
977 }
978
979
980
981 /*
982 * Method states:
983 * 0 - interpreter (CompLevel_none)
984 * 1 - pure C1 (CompLevel_simple)
985 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
986 * 3 - C1 with full profiling (CompLevel_full_profile)
987 * 4 - C2 or Graal (CompLevel_full_optimization)
988 *
989 * Common state transition patterns:
990 * a. 0 -> 3 -> 4.
991 * The most common path. But note that even in this straightforward case
992 * profiling can start at level 0 and finish at level 3.
993 *
994 * b. 0 -> 2 -> 3 -> 4.
995 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
996 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
997 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
998 *
999 * c. 0 -> (3->2) -> 4.
1001 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
1002 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1003 * without full profiling while c2 is compiling.
1004 *
1005 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1006 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
1007 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1008 *
1009 * e. 0 -> 4.
1010 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1011 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1012 * the compiled version already exists).
1013 *
1014 * Note that since state 0 can be reached from any other state via deoptimization different loops
1015 * are possible.
1016 *
1017 */
1018
1019 // Common transition function. Given a predicate determines if a method should transition to another level.
1020 template<typename Predicate>
1021 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
1022 CompLevel next_level = cur_level;
1023 int i = method->invocation_count();
1024 int b = method->backedge_count();
1025
1026 if (force_comp_at_level_simple(method)) {
1027 next_level = CompLevel_simple;
1028 } else {
1029 if (is_trivial(method) || method->is_native()) {
1030 next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1031 } else {
1032 switch(cur_level) {
1033 default: break;
1034 case CompLevel_none:
1035 // If we were at full profile level, would we switch to full opt?
1036 if (common<Predicate>(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
1037 next_level = CompLevel_full_optimization;
1038 } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(method, cur_level, i, b)) {
1039 // C1-generated fully profiled code is about 30% slower than the limited profile
1040 // code that has only invocation and backedge counters. The observation is that
1041 // if C2 queue is large enough we can spend too much time in the fully profiled code
1042 // while waiting for C2 to pick the method from the queue. To alleviate this problem
1043 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1044 // we choose to compile a limited profiled version and then recompile with full profiling
1045 // when the load on C2 goes down.
1046 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
1047 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
1048 next_level = CompLevel_limited_profile;
1049 } else {
1050 next_level = CompLevel_full_profile;
1051 }
1052 }
1053 break;
1054 case CompLevel_limited_profile:
1055 if (is_method_profiled(method)) {
1056 // Special case: we got here because this method was fully profiled in the interpreter.
1057 next_level = CompLevel_full_optimization;
1058 } else {
1059 MethodData* mdo = method->method_data();
1060 if (mdo != nullptr) {
1061 if (mdo->would_profile()) {
1062 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1063 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1064 Predicate::apply(method, cur_level, i, b))) {
1065 next_level = CompLevel_full_profile;
1066 }
1067 } else {
1068 next_level = CompLevel_full_optimization;
1069 }
1070 } else {
1071 // If there is no MDO we need to profile
1072 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1073 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1074 Predicate::apply(method, cur_level, i, b))) {
1075 next_level = CompLevel_full_profile;
1076 }
1077 }
1078 }
1079 break;
1080 case CompLevel_full_profile:
1081 {
1082 MethodData* mdo = method->method_data();
1083 if (mdo != nullptr) {
1084 if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1085 int mdo_i = mdo->invocation_count_delta();
1086 int mdo_b = mdo->backedge_count_delta();
1087 if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1088 next_level = CompLevel_full_optimization;
1089 }
1090 } else {
1091 next_level = CompLevel_full_optimization;
1092 }
1093 }
1094 }
1095 break;
1096 }
1097 }
1098 }
1099 return (next_level != cur_level) ? limit_level(next_level) : next_level;
1100 }
1101
1102
1103
1104 // Determine if a method should be compiled with a normal entry point at a different level.
1105 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
1106 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, true));
1107 CompLevel next_level = common<CallPredicate>(method, cur_level, is_old(method));
1108
1109 // If OSR method level is greater than the regular method level, the levels should be
1110 // equalized by raising the regular method level in order to avoid OSRs during each
1111 // invocation of the method.
1112 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1113 MethodData* mdo = method->method_data();
1114 guarantee(mdo != nullptr, "MDO should not be nullptr");
1115 if (mdo->invocation_count() >= 1) {
1116 next_level = CompLevel_full_optimization;
1117 }
1118 } else {
1119 next_level = MAX2(osr_level, next_level);
1120 }
1121 return next_level;
1122 }
1123
1124 // Determine if we should do an OSR compilation of a given method.
1125 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
1126 CompLevel next_level = common<LoopPredicate>(method, cur_level, true);
1127 if (cur_level == CompLevel_none) {
1128 // If there is a live OSR method that means that we deopted to the interpreter
1129 // for the transition.
1130 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1131 if (osr_level > CompLevel_none) {
1132 return osr_level;
1133 }
1134 }
1135 return next_level;
1136 }
1137
1138 // Handle the invocation event.
1139 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1140 CompLevel level, nmethod* nm, TRAPS) {
1141 if (should_create_mdo(mh, level)) {
1142 create_mdo(mh, THREAD);
1143 }
1144 CompLevel next_level = call_event(mh, level, THREAD);
1145 if (next_level != level) {
1146 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/aotLinkedClassBulkLoader.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "code/SCCache.hpp"
29 #include "compiler/compilationPolicy.hpp"
30 #include "compiler/compileBroker.hpp"
31 #include "compiler/compilerDefinitions.inline.hpp"
32 #include "compiler/compilerOracle.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/methodData.hpp"
35 #include "oops/method.inline.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/recompilationSchedule.hpp"
38 #include "oops/trainingData.hpp"
39 #include "prims/jvmtiExport.hpp"
40 #include "runtime/arguments.hpp"
41 #include "runtime/deoptimization.hpp"
42 #include "runtime/frame.hpp"
43 #include "runtime/frame.inline.hpp"
44 #include "runtime/globals_extension.hpp"
45 #include "runtime/handles.inline.hpp"
46 #include "runtime/safepoint.hpp"
47 #include "runtime/safepointVerifiers.hpp"
48 #ifdef COMPILER1
49 #include "c1/c1_Compiler.hpp"
50 #endif
51 #ifdef COMPILER2
52 #include "opto/c2compiler.hpp"
53 #endif
54 #if INCLUDE_JVMCI
55 #include "jvmci/jvmci.hpp"
56 #endif
57
58 int64_t CompilationPolicy::_start_time = 0;
59 int CompilationPolicy::_c1_count = 0;
60 int CompilationPolicy::_c2_count = 0;
61 int CompilationPolicy::_c3_count = 0;
62 int CompilationPolicy::_sc_count = 0;
63 double CompilationPolicy::_increase_threshold_at_ratio = 0;
64
65 CompilationPolicy::LoadAverage CompilationPolicy::_load_average;
66 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
67 volatile bool CompilationPolicy::_recompilation_done = false;
68
69 void compilationPolicy_init() {
70 CompilationPolicy::initialize();
71 }
72
73 int CompilationPolicy::compiler_count(CompLevel comp_level) {
74 if (is_c1_compile(comp_level)) {
75 return c1_count();
76 } else if (is_c2_compile(comp_level)) {
77 return c2_count();
78 }
79 return 0;
80 }
81
82 void CompilationPolicy::sample_load_average() {
83 const int c2_queue_size = CompileBroker::queue_size(CompLevel_full_optimization);
84 _load_average.sample(c2_queue_size);
85 }
86
87 bool CompilationPolicy::have_recompilation_work() {
88 if (UseRecompilation && TrainingData::have_data() && RecompilationSchedule::have_schedule() &&
89 RecompilationSchedule::length() > 0 && !_recompilation_done) {
90 if (_load_average.value() <= RecompilationLoadAverageThreshold) {
91 return true;
92 }
93 }
94 return false;
95 }
96
97 bool CompilationPolicy::recompilation_step(int step, TRAPS) {
98 if (!have_recompilation_work() || os::elapsedTime() < DelayRecompilation) {
99 return false;
100 }
101
102 const int size = RecompilationSchedule::length();
103 int i = 0;
104 int count = 0;
105 bool repeat = false;
106 for (; i < size && count < step; i++) {
107 if (!RecompilationSchedule::status_at(i)) {
108 MethodTrainingData* mtd = RecompilationSchedule::at(i);
109 if (!mtd->has_holder()) {
110 RecompilationSchedule::set_status_at(i, true);
111 continue;
112 }
113 const Method* method = mtd->holder();
114 InstanceKlass* klass = method->method_holder();
115 if (klass->is_not_initialized()) {
116 repeat = true;
117 continue;
118 }
119 nmethod *nm = method->code();
120 if (nm == nullptr) {
121 repeat = true;
122 continue;
123 }
124
125 if (!ForceRecompilation && !(nm->is_scc() && nm->comp_level() == CompLevel_full_optimization)) {
126 // If it's already online-compiled at level 4, mark it as done.
127 if (nm->comp_level() == CompLevel_full_optimization) {
128 RecompilationSchedule::set_status_at(i, true);
129 } else {
130 repeat = true;
131 }
132 continue;
133 }
134 if (RecompilationSchedule::claim_at(i)) {
135 const methodHandle m(THREAD, const_cast<Method*>(method));
136 CompLevel next_level = CompLevel_full_optimization;
137
138 if (method->method_data() == nullptr) {
139 create_mdo(m, THREAD);
140 }
141
142 if (PrintTieredEvents) {
143 print_event(FORCE_RECOMPILE, m(), m(), InvocationEntryBci, next_level);
144 }
145 CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization, methodHandle(), 0,
146 true /*requires_online_compilation*/, CompileTask::Reason_MustBeCompiled, THREAD);
147 if (HAS_PENDING_EXCEPTION) {
148 CLEAR_PENDING_EXCEPTION;
149 }
150 count++;
151 }
152 }
153 }
154
155 if (i == size && !repeat) {
156 Atomic::release_store(&_recompilation_done, true);
157 }
158 return count > 0;
159 }
160
161 // Returns true if m must be compiled before executing it
162 // This is intended to force compiles for methods (usually for
163 // debugging) that would otherwise be interpreted for some reason.
164 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
165 // Don't allow Xcomp to cause compiles in replay mode
166 if (ReplayCompiles) return false;
167
168 if (m->has_compiled_code()) return false; // already compiled
169 if (!can_be_compiled(m, comp_level)) return false;
170
171 return !UseInterpreter || // must compile all methods
172 (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
173 }
174
175 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
176 if (m->method_holder()->is_not_initialized()) {
177 // 'is_not_initialized' means not only '!is_initialized', but also that
178 // initialization has not been started yet ('!being_initialized')
179 // Do not force compilation of methods in uninitialized classes.
180 return;
181 }
182 if (!m->is_native() && MethodTrainingData::have_data()) {
183 MethodTrainingData* mtd = MethodTrainingData::find(m);
184 if (mtd == nullptr) {
185 return; // there is no training data recorded for m
186 }
187 bool recompile = m->code_has_clinit_barriers();
188 CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
189 CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
190 if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
191 bool requires_online_compilation = false;
192 CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
193 if (ctd != nullptr) {
194 requires_online_compilation = (ctd->init_deps_left() > 0);
195 }
196 if (requires_online_compilation && recompile) {
197 return;
198 }
199 if (PrintTieredEvents) {
200 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
201 }
202 CompileBroker::compile_method(m, InvocationEntryBci, next_level, methodHandle(), 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
203 if (HAS_PENDING_EXCEPTION) {
204 CLEAR_PENDING_EXCEPTION;
205 }
206 }
207 }
208 }
209
210 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
211 assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
212 maybe_compile_early(m, THREAD);
213 }
214
215 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
216 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
217 // don't force compilation, resolve was on behalf of compiler
218 return;
219 }
220 if (m->method_holder()->is_not_initialized()) {
221 // 'is_not_initialized' means not only '!is_initialized', but also that
222 // initialization has not been started yet ('!being_initialized')
223 // Do not force compilation of methods in uninitialized classes.
224 // Note that doing this would throw an assert later,
225 // in CompileBroker::compile_method.
226 // We sometimes use the link resolver to do reflective lookups
227 // even before classes are initialized.
228 return;
229 }
230
231 if (must_be_compiled(m)) {
232 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
233 CompLevel level = initial_compile_level(m);
234 if (PrintTieredEvents) {
235 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
236 }
237 CompileBroker::compile_method(m, InvocationEntryBci, level, methodHandle(), 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
238 }
239 }
240
241 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
242 if (!klass->has_init_deps_processed()) {
243 ResourceMark rm;
244 log_debug(training)("Replay training: %s", klass->external_name());
245
246 KlassTrainingData* ktd = KlassTrainingData::find(klass);
247 if (ktd != nullptr) {
248 guarantee(ktd->has_holder(), "");
249 ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
250 assert(klass->has_init_deps_processed(), "");
251
252 ktd->iterate_all_comp_deps([&](CompileTrainingData* ctd) {
253 if (ctd->init_deps_left() == 0) {
254 MethodTrainingData* mtd = ctd->method();
255 if (mtd->has_holder()) {
256 const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
257 CompilationPolicy::maybe_compile_early(mh, THREAD);
258 }
259 }
260 });
261 }
262 Array<Method*>* methods = klass->methods();
263 for (int i = 0; i < methods->length(); i++) {
264 const methodHandle mh(THREAD, methods->at(i));
265 CompilationPolicy::maybe_compile_early_after_init(mh, THREAD);
266 }
267 }
268 }
269
270 void CompilationPolicy::replay_training_at_init(bool is_on_shutdown, TRAPS) {
271 // Drain pending queue when no concurrent processing thread is present.
272 if (UseConcurrentTrainingReplay) {
273 if (VerifyTrainingData) {
274 MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
275 while (!_training_replay_queue.is_empty_unlocked()) {
276 locker.wait(); // let the replay training thread drain the queue
277 }
278 }
279 } else {
280 do {
281 InstanceKlass* pending = _training_replay_queue.try_pop(TrainingReplayQueue_lock, THREAD);
282 if (pending == nullptr) {
283 break; // drained the queue
284 }
285 if (is_on_shutdown) {
286 LogStreamHandle(Warning, training) log;
287 if (log.is_enabled()) {
288 ResourceMark rm;
289 log.print("pending training replay request: %s%s",
290 pending->external_name(), (pending->has_preinitialized_mirror() ? " (preinitialized)" : ""));
291 }
292 }
293 replay_training_at_init_impl(pending, THREAD);
294 } while (true);
295 }
296
297 if (VerifyTrainingData) {
298 TrainingData::verify();
299 }
300 }
301
302 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
303 assert(klass->is_initialized(), "");
304 if (TrainingData::have_data() && klass->is_shared() &&
305 (CompileBroker::replay_initialized() || !klass->has_preinitialized_mirror())) { // ignore preloaded classes during early startup
306 if (UseConcurrentTrainingReplay || !CompileBroker::replay_initialized()) {
307 _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
308 } else {
309 replay_training_at_init_impl(klass, THREAD);
310 }
311 assert(!HAS_PENDING_EXCEPTION, "");
312 }
313 }
314
315 // For TrainingReplayQueue
316 template<>
317 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
318 int pos = 0;
319 for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
320 ResourceMark rm;
321 InstanceKlass* ik = cur->value();
322 st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
323 }
324 }
325
326 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
327 precond(UseConcurrentTrainingReplay);
328
329 while (!CompileBroker::is_compilation_disabled_forever() || VerifyTrainingData) {
330 InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
331 replay_training_at_init_impl(ik, THREAD);
332 }
333 }
334
335 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
336 if (comp_level == CompLevel_any) {
337 if (CompilerConfig::is_c1_only()) {
338 comp_level = CompLevel_simple;
339 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
340 comp_level = CompLevel_full_optimization;
341 }
342 }
343 return comp_level;
344 }
345
346 // Returns true if m is allowed to be compiled
347 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
348 // allow any levels for WhiteBox
349 assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
350
351 if (m->is_abstract()) return false;
352 if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
353
354 // Math intrinsics should never be compiled as this can lead to
355 // monotonicity problems because the interpreter will prefer the
356 // compiled code to the intrinsic version. This can't happen in
357 // production because the invocation counter can't be incremented
358 // but we shouldn't expose the system to this problem in testing
359 // modes.
360 if (!AbstractInterpreter::can_be_compiled(m)) {
361 return false;
362 }
363 comp_level = adjust_level_for_compilability_query((CompLevel) comp_level);
364 if (comp_level == CompLevel_any || is_compile(comp_level)) {
365 return !m->is_not_compilable(comp_level);
366 }
367 return false;
368 }
369
409 #endif
410 return compile_queue->first();
411 }
412
413 // Simple methods are as good being compiled with C1 as C2.
414 // Determine if a given method is such a case.
415 bool CompilationPolicy::is_trivial(const methodHandle& method) {
416 if (method->is_accessor() ||
417 method->is_constant_getter()) {
418 return true;
419 }
420 return false;
421 }
422
423 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
424 if (CompilationModeFlag::quick_internal()) {
425 #if INCLUDE_JVMCI
426 if (UseJVMCICompiler) {
427 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
428 if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
429 return !SCCache::is_C3_on();
430 }
431 }
432 #endif
433 }
434 return false;
435 }
436
437 CompLevel CompilationPolicy::comp_level(Method* method) {
438 nmethod *nm = method->code();
439 if (nm != nullptr && nm->is_in_use()) {
440 return (CompLevel)nm->comp_level();
441 }
442 return CompLevel_none;
443 }
444
445 // Call and loop predicates determine whether a transition to a higher
446 // compilation level should be performed (pointers to predicate functions
447 // are passed to common()).
448 // Tier?LoadFeedback is basically a coefficient that determines of
449 // how many methods per compiler thread can be in the queue before
529 int comp_count = compiler_count(level);
530 if (comp_count > 0) {
531 double queue_size = CompileBroker::queue_size(level);
532 double k = (double)queue_size / ((double)feedback_k * (double)comp_count) + 1;
533
534 // Increase C1 compile threshold when the code cache is filled more
535 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
536 // The main intention is to keep enough free space for C2 compiled code
537 // to achieve peak performance if the code cache is under stress.
538 if (CompilerConfig::is_tiered() && !CompilationModeFlag::disable_intermediate() && is_c1_compile(level)) {
539 double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
540 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
541 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
542 }
543 }
544 return k;
545 }
546 return 1;
547 }
548
549 void CompilationPolicy::print_counters(const char* prefix, Method* m) {
550 int invocation_count = m->invocation_count();
551 int backedge_count = m->backedge_count();
552 MethodData* mdh = m->method_data();
553 int mdo_invocations = 0, mdo_backedges = 0;
554 int mdo_invocations_start = 0, mdo_backedges_start = 0;
555 if (mdh != nullptr) {
556 mdo_invocations = mdh->invocation_count();
557 mdo_backedges = mdh->backedge_count();
558 mdo_invocations_start = mdh->invocation_count_start();
559 mdo_backedges_start = mdh->backedge_count_start();
560 }
561 tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix,
562 invocation_count, backedge_count, prefix,
563 mdo_invocations, mdo_invocations_start,
564 mdo_backedges, mdo_backedges_start);
565 tty->print(" %smax levels=%d,%d", prefix,
566 m->highest_comp_level(), m->highest_osr_comp_level());
567 }
568
569 void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
570 methodHandle m(Thread::current(), method);
571 tty->print(" %smtd: ", prefix);
572 MethodTrainingData* mtd = MethodTrainingData::find(m);
573 if (mtd == nullptr) {
574 tty->print("null");
575 } else {
576 MethodData* md = mtd->final_profile();
577 tty->print("mdo=");
578 if (md == nullptr) {
579 tty->print("null");
580 } else {
581 int mdo_invocations = md->invocation_count();
582 int mdo_backedges = md->backedge_count();
583 int mdo_invocations_start = md->invocation_count_start();
584 int mdo_backedges_start = md->backedge_count_start();
585 tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
586 }
587 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
588 tty->print(", deps=");
589 if (ctd == nullptr) {
590 tty->print("null");
591 } else {
592 tty->print("%d", ctd->init_deps_left());
593 }
594 }
595 }
596
597 // Print an event.
598 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
599 bool inlinee_event = m != im;
600
601 ttyLocker tty_lock;
602 tty->print("%lf: [", os::elapsedTime());
603
604 switch(type) {
605 case CALL:
606 tty->print("call");
607 break;
608 case LOOP:
609 tty->print("loop");
610 break;
611 case COMPILE:
612 tty->print("compile");
613 break;
614 case FORCE_COMPILE:
615 tty->print("force-compile");
616 break;
617 case FORCE_RECOMPILE:
618 tty->print("force-recompile");
619 break;
620 case REMOVE_FROM_QUEUE:
621 tty->print("remove-from-queue");
622 break;
623 case UPDATE_IN_QUEUE:
624 tty->print("update-in-queue");
625 break;
626 case REPROFILE:
627 tty->print("reprofile");
628 break;
629 case MAKE_NOT_ENTRANT:
630 tty->print("make-not-entrant");
631 break;
632 default:
633 tty->print("unknown");
634 }
635
636 tty->print(" level=%d ", level);
637
638 ResourceMark rm;
639 char *method_name = m->name_and_sig_as_C_string();
640 tty->print("[%s", method_name);
641 if (inlinee_event) {
642 char *inlinee_name = im->name_and_sig_as_C_string();
643 tty->print(" [%s]] ", inlinee_name);
644 }
645 else tty->print("] ");
646 tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
647 CompileBroker::queue_size(CompLevel_full_optimization));
648
649 tty->print(" rate=");
650 if (m->prev_time() == 0) tty->print("n/a");
651 else tty->print("%f", m->rate());
652 tty->print(" load=%lf", _load_average.value());
653
654 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
655 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
656
657 if (type != COMPILE) {
658 print_counters("", m);
659 if (inlinee_event) {
660 print_counters("inlinee ", im);
661 }
662 tty->print(" compilable=");
663 bool need_comma = false;
664 if (!m->is_not_compilable(CompLevel_full_profile)) {
665 tty->print("c1");
666 need_comma = true;
667 }
668 if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
669 if (need_comma) tty->print(",");
670 tty->print("c1-osr");
671 need_comma = true;
672 }
673 if (!m->is_not_compilable(CompLevel_full_optimization)) {
674 if (need_comma) tty->print(",");
675 tty->print("c2");
676 need_comma = true;
677 }
678 if (!m->is_not_osr_compilable(CompLevel_full_optimization)) {
679 if (need_comma) tty->print(",");
680 tty->print("c2-osr");
681 }
682 tty->print(" status=");
683 if (m->queued_for_compilation()) {
684 tty->print("in-queue");
685 } else tty->print("idle");
686 print_training_data("", m);
687 if (inlinee_event) {
688 print_training_data("inlinee ", im);
689 }
690 }
691 tty->print_cr("]");
692 }
693
694 void CompilationPolicy::initialize() {
695 if (!CompilerConfig::is_interpreter_only()) {
696 int count = CICompilerCount;
697 bool c1_only = CompilerConfig::is_c1_only();
698 bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
699
700 #ifdef _LP64
701 // Turn on ergonomic compiler count selection
702 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
703 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
704 }
705 if (CICompilerCountPerCPU) {
706 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
707 int log_cpu = log2i(os::active_processor_count());
708 int loglog_cpu = log2i(MAX2(log_cpu, 1));
709 count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
733 // available cores can result in the exhaustion of the address space
734 /// available to the VM and thus cause the VM to crash.
735 if (FLAG_IS_DEFAULT(CICompilerCount)) {
736 count = 3;
737 FLAG_SET_ERGO(CICompilerCount, count);
738 }
739 #endif
740
741 if (c1_only) {
742 // No C2 compiler thread required
743 set_c1_count(count);
744 } else if (c2_only) {
745 set_c2_count(count);
746 } else {
747 #if INCLUDE_JVMCI
748 if (UseJVMCICompiler && UseJVMCINativeLibrary) {
749 int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
750 int c1_count = MAX2(count - libjvmci_count, 1);
751 set_c2_count(libjvmci_count);
752 set_c1_count(c1_count);
753 } else if (SCCache::is_C3_on()) {
754 set_c1_count(MAX2(count / 3, 1));
755 set_c2_count(MAX2(count - c1_count(), 1));
756 set_c3_count(1);
757 } else
758 #endif
759 {
760 set_c1_count(MAX2(count / 3, 1));
761 set_c2_count(MAX2(count - c1_count(), 1));
762 }
763 }
764 if (SCCache::is_code_load_thread_on()) {
765 set_sc_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
766 }
767 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
768 set_increase_threshold_at_ratio();
769 }
770
771 set_start_time(nanos_to_millis(os::javaTimeNanos()));
772 }
773
774
775
776
777 #ifdef ASSERT
778 bool CompilationPolicy::verify_level(CompLevel level) {
779 if (TieredCompilation && level > TieredStopAtLevel) {
780 return false;
781 }
782 // Check if there is a compiler to process the requested level
783 if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
784 return false;
785 }
786 if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
787 return false;
788 }
789
790 // Interpreter level is always valid.
791 if (level == CompLevel_none) {
792 return true;
793 }
794 if (CompilationModeFlag::normal()) {
795 return true;
796 } else if (CompilationModeFlag::quick_only()) {
873 }
874 assert(level != CompLevel_any, "Unhandled compilation mode");
875 return limit_level(level);
876 }
877
878 // Set carry flags on the counters if necessary
879 void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
880 MethodCounters *mcs = method->method_counters();
881 if (mcs != nullptr) {
882 mcs->invocation_counter()->set_carry_on_overflow();
883 mcs->backedge_counter()->set_carry_on_overflow();
884 }
885 MethodData* mdo = method->method_data();
886 if (mdo != nullptr) {
887 mdo->invocation_counter()->set_carry_on_overflow();
888 mdo->backedge_counter()->set_carry_on_overflow();
889 }
890 }
891
892 // Called with the queue locked and with at least one element
893 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
894 CompileTask *max_blocking_task = nullptr;
895 CompileTask *max_task = nullptr;
896 Method* max_method = nullptr;
897
898 int64_t t = nanos_to_millis(os::javaTimeNanos());
899 // Iterate through the queue and find a method with a maximum rate.
900 for (CompileTask* task = compile_queue->first(); task != nullptr;) {
901 CompileTask* next_task = task->next();
902 // If a method was unloaded or has been stale for some time, remove it from the queue.
903 // Blocking tasks and tasks submitted from whitebox API don't become stale
904 if (task->is_unloaded()) {
905 compile_queue->remove_and_mark_stale(task);
906 task = next_task;
907 continue;
908 }
909 if (task->is_scc()) {
910 // SCC tasks are on separate queue, and they should load fast. There is no need to walk
911 // the rest of the queue, just take the task and go.
912 return task;
913 }
914 Method* method = task->method();
915 methodHandle mh(THREAD, method);
916 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
917 if (PrintTieredEvents) {
918 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
919 }
920 method->clear_queued_for_compilation();
921 method->set_pending_queue_processed(false);
922 compile_queue->remove_and_mark_stale(task);
923 task = next_task;
924 continue;
925 }
926 update_rate(t, mh);
927 if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
928 // Select a method with the highest rate
929 max_task = task;
930 max_method = method;
931 }
932
933 if (task->is_blocking()) {
934 if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
935 max_blocking_task = task;
936 }
937 }
938
939 task = next_task;
940 }
941
942 if (max_blocking_task != nullptr) {
943 // In blocking compilation mode, the CompileBroker will make
944 // compilations submitted by a JVMCI compiler thread non-blocking. These
945 // compilations should be scheduled after all blocking compilations
946 // to service non-compiler related compilations sooner and reduce the
947 // chance of such compilations timing out.
948 max_task = max_blocking_task;
949 max_method = max_task->method();
950 }
951
952 methodHandle max_method_h(THREAD, max_method);
953
954 if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
955 max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
956 max_task->set_comp_level(CompLevel_limited_profile);
957
958 if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
959 false /* requires_online_compilation */,
960 CompileTask::Reason_None)) {
961 if (PrintTieredEvents) {
962 print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
963 }
964 compile_queue->remove_and_mark_stale(max_task);
965 max_method->clear_queued_for_compilation();
966 return nullptr;
967 }
968
969 if (PrintTieredEvents) {
970 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
971 }
972 }
973 return max_task;
974 }
975
976 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
977 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
978 if (PrintTieredEvents) {
979 print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
980 }
981 MethodData* mdo = sd->method()->method_data();
982 if (mdo != nullptr) {
983 mdo->reset_start_counters();
984 }
985 if (sd->is_top()) break;
986 }
987 }
988
989 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
990 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
991 if (PrintTieredEvents) {
992 print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
993 }
994
995 #if INCLUDE_JVMCI
996 if (EnableJVMCI && UseJVMCICompiler &&
997 comp_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
998 return nullptr;
999 }
1000 #endif
1001
1002 if (comp_level == CompLevel_none &&
1003 JvmtiExport::can_post_interpreter_events() &&
1004 THREAD->is_interp_only_mode()) {
1005 return nullptr;
1006 }
1007 if (ReplayCompiles) {
1008 // Don't trigger other compiles in testing mode
1009 return nullptr;
1010 }
1011
1012 handle_counter_overflow(method);
1013 if (method() != inlinee()) {
1014 handle_counter_overflow(inlinee);
1015 }
1016
1017 if (bci == InvocationEntryBci) {
1018 method_invocation_event(method, inlinee, comp_level, nm, THREAD);
1019 } else {
1020 // method == inlinee if the event originated in the main method
1021 method_back_branch_event(method, inlinee, bci, comp_level, nm, THREAD);
1077 if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
1078 nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
1079 if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
1080 // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
1081 osr_nm->make_not_entrant();
1082 }
1083 compile(mh, bci, CompLevel_simple, THREAD);
1084 }
1085 return;
1086 }
1087 }
1088 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
1089 return;
1090 }
1091 if (!CompileBroker::compilation_is_in_queue(mh)) {
1092 if (PrintTieredEvents) {
1093 print_event(COMPILE, mh(), mh(), bci, level);
1094 }
1095 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1096 update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1097 bool requires_online_compilation = false;
1098 if (TrainingData::have_data()) {
1099 MethodTrainingData* mtd = MethodTrainingData::find(mh);
1100 if (mtd != nullptr) {
1101 CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1102 if (ctd != nullptr) {
1103 requires_online_compilation = (ctd->init_deps_left() > 0);
1104 }
1105 }
1106 }
1107 CompileBroker::compile_method(mh, bci, level, mh, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1108 }
1109 }
1110
1111 // update_rate() is called from select_task() while holding a compile queue lock.
1112 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1113 // Skip update if counters are absent.
1114 // Can't allocate them since we are holding compile queue lock.
1115 if (method->method_counters() == nullptr) return;
1116
1117 if (is_old(method)) {
1118 // We don't remove old methods from the queue,
1119 // so we can just zero the rate.
1120 method->set_rate(0);
1121 return;
1122 }
1123
1124 // We don't update the rate if we've just came out of a safepoint.
1125 // delta_s is the time since last safepoint in milliseconds.
1126 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1127 int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1128 // How many events were there since the last time?
1129 int event_count = method->invocation_count() + method->backedge_count();
1130 int delta_e = event_count - method->prev_event_count();
1131
1132 // We should be running for at least 1ms.
1133 if (delta_s >= TieredRateUpdateMinTime) {
1134 // And we must've taken the previous point at least 1ms before.
1135 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
1136 method->set_prev_time(t);
1137 method->set_prev_event_count(event_count);
1138 method->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
1139 } else {
1140 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
1141 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
1142 method->set_rate(0);
1143 }
1144 }
1145 }
1146 }
1147
1148 // Check if this method has been stale for a given number of milliseconds.
1149 // See select_task().
1150 bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
1151 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1152 int64_t delta_t = t - method->prev_time();
1153 if (delta_t > timeout && delta_s > timeout) {
1154 int event_count = method->invocation_count() + method->backedge_count();
1155 int delta_e = event_count - method->prev_event_count();
1156 // Return true if there were no events.
1157 return delta_e == 0;
1158 }
1159 return false;
1160 }
1161
1162 // We don't remove old methods from the compile queue even if they have
1163 // very low activity. See select_task().
1164 bool CompilationPolicy::is_old(const methodHandle& method) {
1165 int i = method->invocation_count();
1166 int b = method->backedge_count();
1167 double k = TieredOldPercentage / 100.0;
1168
1169 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1170 }
1171
1172 double CompilationPolicy::weight(Method* method) {
1173 return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1174 }
1175
1176 // Apply heuristics and return true if x should be compiled before y
1177 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1178 if (x->highest_comp_level() > y->highest_comp_level()) {
1179 // recompilation after deopt
1180 return true;
1181 } else
1182 if (x->highest_comp_level() == y->highest_comp_level()) {
1183 if (weight(x) > weight(y)) {
1184 return true;
1185 }
1186 }
1187 return false;
1188 }
1189
1190 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1191 assert(!x->is_scc() && !y->is_scc(), "SC tasks are not expected here");
1192 if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1193 return true;
1194 }
1195 return false;
1196 }
1197
1198 // Is method profiled enough?
1199 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1200 MethodData* mdo = method->method_data();
1201 if (mdo != nullptr) {
1202 int i = mdo->invocation_count_delta();
1203 int b = mdo->backedge_count_delta();
1204 return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1205 }
1206 return false;
1207 }
1208
1209
1210 // Determine is a method is mature.
1211 bool CompilationPolicy::is_mature(MethodData* mdo) {
1212 if (Arguments::is_compiler_only()) {
1213 // Always report profiles as immature with -Xcomp
1214 return false;
1215 }
1216 methodHandle mh(Thread::current(), mdo->method());
1217 if (mdo != nullptr) {
1218 int i = mdo->invocation_count();
1219 int b = mdo->backedge_count();
1220 double k = ProfileMaturityPercentage / 100.0;
1221 return CallPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k) || LoopPredicate::apply_scaled(mh, CompLevel_full_profile, i, b, k);
1222 }
1223 return false;
1224 }
1225
1226 // If a method is old enough and is still in the interpreter we would want to
1227 // start profiling without waiting for the compiled method to arrive.
1228 // We also take the load on compilers into the account.
1229 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1230 if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1231 return false;
1232 }
1233
1234 if (TrainingData::have_data()) {
1235 MethodTrainingData* mtd = MethodTrainingData::find(method);
1236 if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1237 return true;
1238 }
1239 return false;
1240 }
1241
1242 if (is_old(method)) {
1243 return true;
1244 }
1245
1246 int i = method->invocation_count();
1247 int b = method->backedge_count();
1248 double k = Tier0ProfilingStartPercentage / 100.0;
1249
1250 // If the top level compiler is not keeping up, delay profiling.
1251 if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1252 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1253 }
1254 return false;
1255 }
1256
1257 // Inlining control: if we're compiling a profiled method with C1 and the callee
1258 // is known to have OSRed in a C2 version, don't inline it.
1259 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1260 CompLevel comp_level = (CompLevel)env->comp_level();
1261 if (comp_level == CompLevel_full_profile ||
1262 comp_level == CompLevel_limited_profile) {
1263 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1264 }
1265 return false;
1266 }
1267
1268 // Create MDO if necessary.
1269 void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
1270 if (mh->is_native() ||
1271 mh->is_abstract() ||
1272 mh->is_accessor() ||
1273 mh->is_constant_getter()) {
1274 return;
1275 }
1276 if (mh->method_data() == nullptr) {
1277 Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
1278 }
1279 if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
1280 MethodData* mdo = mh->method_data();
1281 if (mdo != nullptr) {
1282 frame last_frame = THREAD->last_frame();
1283 if (last_frame.is_interpreted_frame() && mh == last_frame.interpreter_frame_method()) {
1284 int bci = last_frame.interpreter_frame_bci();
1285 address dp = mdo->bci_to_dp(bci);
1286 last_frame.interpreter_frame_set_mdp(dp);
1287 }
1288 }
1289 }
1290 }
1291
1292 CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1293 precond(mtd != nullptr);
1294 precond(cur_level == CompLevel_none);
1295
1296 if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
1297 return CompLevel_none;
1298 }
1299
1300 bool training_has_profile = (mtd->final_profile() != nullptr);
1301 if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
1302 return CompLevel_full_profile;
1303 }
1304
1305 CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1306 switch (highest_training_level) {
1307 case CompLevel_limited_profile:
1308 case CompLevel_full_profile:
1309 return CompLevel_limited_profile;
1310 case CompLevel_simple:
1311 return CompLevel_simple;
1312 case CompLevel_none:
1313 return CompLevel_none;
1314 default:
1315 break;
1316 }
1317
1318 // Now handle the case of level 4.
1319 assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
1320 if (!training_has_profile) {
1321 // The method was a part of a level 4 compile, but don't have a stored profile,
1322 // we need to profile it.
1323 return CompLevel_full_profile;
1324 }
1325 const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
1326 // If we deopted, then we reprofile
1327 if (deopt && !is_method_profiled(method)) {
1328 return CompLevel_full_profile;
1329 }
1330
1331 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1332 assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
1333 // With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
1334 if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
1335 if (method->method_data() == nullptr) {
1336 create_mdo(method, THREAD);
1337 }
1338 return CompLevel_full_optimization;
1339 }
1340
1341 // Otherwise go to level 2
1342 return CompLevel_limited_profile;
1343 }
1344
1345
1346 CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1347 precond(mtd != nullptr);
1348 precond(cur_level == CompLevel_limited_profile);
1349
1350 // One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
1351
1352 // But first, check if we have a saved profile
1353 bool training_has_profile = (mtd->final_profile() != nullptr);
1354 if (!training_has_profile) {
1355 return CompLevel_full_profile;
1356 }
1357
1358
1359 assert(training_has_profile, "Have to have a profile to be here");
1360 // Check if the method is ready
1361 CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
1362 if (ctd != nullptr && ctd->init_deps_left() == 0) {
1363 if (method->method_data() == nullptr) {
1364 create_mdo(method, THREAD);
1365 }
1366 return CompLevel_full_optimization;
1367 }
1368
1369 // Otherwise stay at the current level
1370 return CompLevel_limited_profile;
1371 }
1372
1373
1374 CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1375 precond(mtd != nullptr);
1376 precond(cur_level == CompLevel_full_profile);
1377
1378 CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
1379 // We have method at the full profile level and we also know that it's possibly an important method.
1380 if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
1381 // Check if it is adequately profiled
1382 if (is_method_profiled(method)) {
1383 return CompLevel_full_optimization;
1384 }
1385 }
1386
1387 // Otherwise stay at the current level
1388 return CompLevel_full_profile;
1389 }
1390
1391 CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
1392 precond(MethodTrainingData::have_data());
1393
1394 // If there is no training data recorded for this method, bail out.
1395 if (mtd == nullptr) {
1396 return cur_level;
1397 }
1398
1399 CompLevel next_level = cur_level;
1400 switch(cur_level) {
1401 default: break;
1402 case CompLevel_none:
1403 next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
1404 break;
1405 case CompLevel_limited_profile:
1406 next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
1407 break;
1408 case CompLevel_full_profile:
1409 next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
1410 break;
1411 }
1412
1413 // We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
1414 if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
1415 return CompLevel_none;
1416 }
1417 if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
1418 return CompLevel_none;
1419 }
1420 return (cur_level != next_level) ? limit_level(next_level) : cur_level;
1421 }
1422
1423 /*
1424 * Method states:
1425 * 0 - interpreter (CompLevel_none)
1426 * 1 - pure C1 (CompLevel_simple)
1427 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
1428 * 3 - C1 with full profiling (CompLevel_full_profile)
1429 * 4 - C2 or Graal (CompLevel_full_optimization)
1430 *
1431 * Common state transition patterns:
1432 * a. 0 -> 3 -> 4.
1433 * The most common path. But note that even in this straightforward case
1434 * profiling can start at level 0 and finish at level 3.
1435 *
1436 * b. 0 -> 2 -> 3 -> 4.
1437 * This case occurs when the load on C2 is deemed too high. So, instead of transitioning
1438 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
1439 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
1440 *
1441 * c. 0 -> (3->2) -> 4.
1443 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
1444 * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster
1445 * without full profiling while c2 is compiling.
1446 *
1447 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
1448 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
1449 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
1450 *
1451 * e. 0 -> 4.
1452 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
1453 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
1454 * the compiled version already exists).
1455 *
1456 * Note that since state 0 can be reached from any other state via deoptimization different loops
1457 * are possible.
1458 *
1459 */
1460
1461 // Common transition function. Given a predicate determines if a method should transition to another level.
1462 template<typename Predicate>
1463 CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
1464 CompLevel next_level = cur_level;
1465 int i = method->invocation_count();
1466 int b = method->backedge_count();
1467
1468 if (force_comp_at_level_simple(method)) {
1469 next_level = CompLevel_simple;
1470 } else {
1471 if (MethodTrainingData::have_data()) {
1472 MethodTrainingData* mtd = MethodTrainingData::find(method);
1473 if (mtd == nullptr) {
1474 // We haven't see compilations of this method in training. It's either very cold or the behavior changed.
1475 // Feed it to the standard TF with no profiling delay.
1476 next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1477 } else {
1478 next_level = trained_transition(method, cur_level, mtd, THREAD);
1479 if (cur_level == next_level) {
1480 // trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
1481 // In order to catch possible pathologies due to behavior change we feed the event to the regular
1482 // TF but with profiling delay.
1483 next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
1484 }
1485 }
1486 } else if (is_trivial(method) || method->is_native()) {
1487 next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
1488 } else {
1489 next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
1490 }
1491 }
1492 return (next_level != cur_level) ? limit_level(next_level) : next_level;
1493 }
1494
1495
1496 template<typename Predicate>
1497 CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1498 CompLevel next_level = cur_level;
1499 switch(cur_level) {
1500 default: break;
1501 case CompLevel_none:
1502 next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1503 break;
1504 case CompLevel_limited_profile:
1505 next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
1506 break;
1507 case CompLevel_full_profile:
1508 next_level = transition_from_full_profile<Predicate>(method, cur_level);
1509 break;
1510 }
1511 return next_level;
1512 }
1513
1514 template<typename Predicate>
1515 CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1516 precond(cur_level == CompLevel_none);
1517 CompLevel next_level = cur_level;
1518 int i = method->invocation_count();
1519 int b = method->backedge_count();
1520 double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
1521 // If we were at full profile level, would we switch to full opt?
1522 if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
1523 next_level = CompLevel_full_optimization;
1524 } else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
1525 // C1-generated fully profiled code is about 30% slower than the limited profile
1526 // code that has only invocation and backedge counters. The observation is that
1527 // if C2 queue is large enough we can spend too much time in the fully profiled code
1528 // while waiting for C2 to pick the method from the queue. To alleviate this problem
1529 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
1530 // we choose to compile a limited profiled version and then recompile with full profiling
1531 // when the load on C2 goes down.
1532 if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
1533 next_level = CompLevel_limited_profile;
1534 } else {
1535 next_level = CompLevel_full_profile;
1536 }
1537 }
1538 return next_level;
1539 }
1540
1541 template<typename Predicate>
1542 CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
1543 precond(cur_level == CompLevel_full_profile);
1544 CompLevel next_level = cur_level;
1545 MethodData* mdo = method->method_data();
1546 if (mdo != nullptr) {
1547 if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
1548 int mdo_i = mdo->invocation_count_delta();
1549 int mdo_b = mdo->backedge_count_delta();
1550 if (Predicate::apply(method, cur_level, mdo_i, mdo_b)) {
1551 next_level = CompLevel_full_optimization;
1552 }
1553 } else {
1554 next_level = CompLevel_full_optimization;
1555 }
1556 }
1557 return next_level;
1558 }
1559
1560 template<typename Predicate>
1561 CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
1562 precond(cur_level == CompLevel_limited_profile);
1563 CompLevel next_level = cur_level;
1564 int i = method->invocation_count();
1565 int b = method->backedge_count();
1566 double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
1567 MethodData* mdo = method->method_data();
1568 if (mdo != nullptr) {
1569 if (mdo->would_profile()) {
1570 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1571 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1572 Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1573 next_level = CompLevel_full_profile;
1574 }
1575 } else {
1576 next_level = CompLevel_full_optimization;
1577 }
1578 } else {
1579 // If there is no MDO we need to profile
1580 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
1581 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
1582 Predicate::apply_scaled(method, cur_level, i, b, scale))) {
1583 next_level = CompLevel_full_profile;
1584 }
1585 }
1586 if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
1587 next_level = CompLevel_full_optimization;
1588 }
1589 return next_level;
1590 }
1591
1592
1593 // Determine if a method should be compiled with a normal entry point at a different level.
1594 CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1595 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
1596 CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
1597
1598 // If OSR method level is greater than the regular method level, the levels should be
1599 // equalized by raising the regular method level in order to avoid OSRs during each
1600 // invocation of the method.
1601 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
1602 MethodData* mdo = method->method_data();
1603 guarantee(mdo != nullptr, "MDO should not be nullptr");
1604 if (mdo->invocation_count() >= 1) {
1605 next_level = CompLevel_full_optimization;
1606 }
1607 } else {
1608 next_level = MAX2(osr_level, next_level);
1609 }
1610 #if INCLUDE_JVMCI
1611 if (EnableJVMCI && UseJVMCICompiler &&
1612 next_level == CompLevel_full_optimization && !AOTLinkedClassBulkLoader::class_preloading_finished()) {
1613 next_level = cur_level;
1614 }
1615 #endif
1616 return next_level;
1617 }
1618
1619 // Determine if we should do an OSR compilation of a given method.
1620 CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
1621 CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
1622 if (cur_level == CompLevel_none) {
1623 // If there is a live OSR method that means that we deopted to the interpreter
1624 // for the transition.
1625 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
1626 if (osr_level > CompLevel_none) {
1627 return osr_level;
1628 }
1629 }
1630 return next_level;
1631 }
1632
1633 // Handle the invocation event.
1634 void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
1635 CompLevel level, nmethod* nm, TRAPS) {
1636 if (should_create_mdo(mh, level)) {
1637 create_mdo(mh, THREAD);
1638 }
1639 CompLevel next_level = call_event(mh, level, THREAD);
1640 if (next_level != level) {
1641 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
|