6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLinkedClassBulkLoader.hpp"
26 #include "code/scopeDesc.hpp"
27 #include "compiler/compilationPolicy.hpp"
28 #include "compiler/compileBroker.hpp"
29 #include "compiler/compilerDefinitions.inline.hpp"
30 #include "compiler/compilerOracle.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "oops/method.inline.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/oop.inline.hpp"
35 #include "oops/trainingData.hpp"
36 #include "prims/jvmtiExport.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/deoptimization.hpp"
39 #include "runtime/frame.hpp"
40 #include "runtime/frame.inline.hpp"
41 #include "runtime/globals_extension.hpp"
42 #include "runtime/handles.inline.hpp"
43 #include "runtime/safepoint.hpp"
44 #include "runtime/safepointVerifiers.hpp"
45 #ifdef COMPILER1
46 #include "c1/c1_Compiler.hpp"
47 #endif
48 #ifdef COMPILER2
49 #include "opto/c2compiler.hpp"
50 #endif
51 #if INCLUDE_JVMCI
52 #include "jvmci/jvmci.hpp"
53 #endif
54
55 int64_t CompilationPolicy::_start_time = 0;
56 int CompilationPolicy::_c1_count = 0;
57 int CompilationPolicy::_c2_count = 0;
58 double CompilationPolicy::_increase_threshold_at_ratio = 0;
59
60 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
61
62 void compilationPolicy_init() {
63 CompilationPolicy::initialize();
64 }
65
66 int CompilationPolicy::compiler_count(CompLevel comp_level) {
67 if (is_c1_compile(comp_level)) {
68 return c1_count();
69 } else if (is_c2_compile(comp_level)) {
70 return c2_count();
71 }
72 return 0;
73 }
74
75 // Returns true if m must be compiled before executing it
76 // This is intended to force compiles for methods (usually for
77 // debugging) that would otherwise be interpreted for some reason.
78 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
79 // Don't allow Xcomp to cause compiles in replay mode
80 if (ReplayCompiles) return false;
81
82 if (m->has_compiled_code()) return false; // already compiled
83 if (!can_be_compiled(m, comp_level)) return false;
84
85 return !UseInterpreter || // must compile all methods
86 (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
87 }
88
89 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
90 if (m->method_holder()->is_not_initialized()) {
91 // 'is_not_initialized' means not only '!is_initialized', but also that
92 // initialization has not been started yet ('!being_initialized')
93 // Do not force compilation of methods in uninitialized classes.
94 return;
95 }
96 if (!m->is_native() && MethodTrainingData::have_data()) {
97 MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
98 if (mtd == nullptr) {
99 return; // there is no training data recorded for m
100 }
101 CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
102 CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
103 if (next_level != cur_level && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
104 if (PrintTieredEvents) {
105 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
106 }
107 CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
108 if (HAS_PENDING_EXCEPTION) {
109 CLEAR_PENDING_EXCEPTION;
110 }
111 }
112 }
113 }
114
115 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
116 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
117 // don't force compilation, resolve was on behalf of compiler
118 return;
119 }
120 if (m->method_holder()->is_not_initialized()) {
121 // 'is_not_initialized' means not only '!is_initialized', but also that
122 // initialization has not been started yet ('!being_initialized')
123 // Do not force compilation of methods in uninitialized classes.
124 // Note that doing this would throw an assert later,
125 // in CompileBroker::compile_method.
126 // We sometimes use the link resolver to do reflective lookups
127 // even before classes are initialized.
128 return;
129 }
130
131 if (must_be_compiled(m)) {
132 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
133 CompLevel level = initial_compile_level(m);
134 if (PrintTieredEvents) {
135 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
136 }
137 CompileBroker::compile_method(m, InvocationEntryBci, level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
138 }
139 }
140
141 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
142 if (!klass->has_init_deps_processed()) {
143 ResourceMark rm;
144 log_debug(training)("Replay training: %s", klass->external_name());
145
146 KlassTrainingData* ktd = KlassTrainingData::find(klass);
147 if (ktd != nullptr) {
148 guarantee(ktd->has_holder(), "");
149 ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
150 assert(klass->has_init_deps_processed(), "");
151 if (AOTCompileEagerly) {
152 ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
153 if (ctd->init_deps_left() == 0) {
154 MethodTrainingData* mtd = ctd->method();
155 if (mtd->has_holder()) {
156 const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
157 CompilationPolicy::maybe_compile_early(mh, THREAD);
158 }
159 }
160 });
161 }
162 }
163 }
164 }
165
166 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
167 assert(klass->is_initialized(), "");
168 if (TrainingData::have_data() && klass->is_shared()) {
169 _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
170 }
171 }
172
173 // For TrainingReplayQueue
174 template<>
175 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
176 int pos = 0;
177 for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
178 ResourceMark rm;
179 InstanceKlass* ik = cur->value();
180 st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
181 }
182 }
183
184 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
185 while (!CompileBroker::is_compilation_disabled_forever()) {
186 InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
187 if (ik != nullptr) {
188 replay_training_at_init_impl(ik, THREAD);
189 }
190 }
191 }
192
193 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
194 if (comp_level == CompLevel_any) {
195 if (CompilerConfig::is_c1_only()) {
196 comp_level = CompLevel_simple;
197 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
198 comp_level = CompLevel_full_optimization;
199 }
200 }
201 return comp_level;
202 }
203
204 // Returns true if m is allowed to be compiled
205 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
267 #endif
268 return compile_queue->first();
269 }
270
271 // Simple methods are as good being compiled with C1 as C2.
272 // Determine if a given method is such a case.
273 bool CompilationPolicy::is_trivial(const methodHandle& method) {
274 if (method->is_accessor() ||
275 method->is_constant_getter()) {
276 return true;
277 }
278 return false;
279 }
280
281 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
282 if (CompilationModeFlag::quick_internal()) {
283 #if INCLUDE_JVMCI
284 if (UseJVMCICompiler) {
285 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
286 if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
287 return true;
288 }
289 }
290 #endif
291 }
292 return false;
293 }
294
295 CompLevel CompilationPolicy::comp_level(Method* method) {
296 nmethod *nm = method->code();
297 if (nm != nullptr && nm->is_in_use()) {
298 return (CompLevel)nm->comp_level();
299 }
300 return CompLevel_none;
301 }
302
303 // Call and loop predicates determine whether a transition to a higher
304 // compilation level should be performed (pointers to predicate functions
305 // are passed to common()).
306 // Tier?LoadFeedback is basically a coefficient that determines of
307 // how many methods per compiler thread can be in the queue before
455 // Print an event.
456 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
457 bool inlinee_event = m != im;
458
459 ttyLocker tty_lock;
460 tty->print("%lf: [", os::elapsedTime());
461
462 switch(type) {
463 case CALL:
464 tty->print("call");
465 break;
466 case LOOP:
467 tty->print("loop");
468 break;
469 case COMPILE:
470 tty->print("compile");
471 break;
472 case FORCE_COMPILE:
473 tty->print("force-compile");
474 break;
475 case REMOVE_FROM_QUEUE:
476 tty->print("remove-from-queue");
477 break;
478 case UPDATE_IN_QUEUE:
479 tty->print("update-in-queue");
480 break;
481 case REPROFILE:
482 tty->print("reprofile");
483 break;
484 case MAKE_NOT_ENTRANT:
485 tty->print("make-not-entrant");
486 break;
487 default:
488 tty->print("unknown");
489 }
490
491 tty->print(" level=%d ", level);
492
493 ResourceMark rm;
494 char *method_name = m->name_and_sig_as_C_string();
495 tty->print("[%s", method_name);
496 if (inlinee_event) {
497 char *inlinee_name = im->name_and_sig_as_C_string();
498 tty->print(" [%s]] ", inlinee_name);
499 }
500 else tty->print("] ");
501 tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
502 CompileBroker::queue_size(CompLevel_full_optimization));
503
504 tty->print(" rate=");
505 if (m->prev_time() == 0) tty->print("n/a");
506 else tty->print("%f", m->rate());
507
508 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
509 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
510
511 if (type != COMPILE) {
512 print_counters("", m);
513 if (inlinee_event) {
514 print_counters("inlinee ", im);
515 }
516 tty->print(" compilable=");
517 bool need_comma = false;
518 if (!m->is_not_compilable(CompLevel_full_profile)) {
519 tty->print("c1");
520 need_comma = true;
521 }
522 if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
523 if (need_comma) tty->print(",");
524 tty->print("c1-osr");
525 need_comma = true;
526 }
527 if (!m->is_not_compilable(CompLevel_full_optimization)) {
536 tty->print(" status=");
537 if (m->queued_for_compilation()) {
538 tty->print("in-queue");
539 } else tty->print("idle");
540 print_training_data("", m);
541 if (inlinee_event) {
542 print_training_data("inlinee ", im);
543 }
544 }
545 tty->print_cr("]");
546 }
547
548 void CompilationPolicy::initialize() {
549 if (!CompilerConfig::is_interpreter_only()) {
550 int count = CICompilerCount;
551 bool c1_only = CompilerConfig::is_c1_only();
552 bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
553
554 #ifdef _LP64
555 // Turn on ergonomic compiler count selection
556 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
557 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
558 }
559 if (CICompilerCountPerCPU) {
560 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
561 int log_cpu = log2i(os::active_processor_count());
562 int loglog_cpu = log2i(MAX2(log_cpu, 1));
563 count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
564 // Make sure there is enough space in the code cache to hold all the compiler buffers
565 size_t c1_size = 0;
566 #ifdef COMPILER1
567 c1_size = Compiler::code_buffer_size();
568 #endif
569 size_t c2_size = 0;
570 #ifdef COMPILER2
571 c2_size = C2Compiler::initial_code_buffer_size();
572 #endif
573 size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
574 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
575 if (count > max_count) {
576 // Lower the compiler count such that all buffers fit into the code cache
577 count = MAX2(max_count, c1_only ? 1 : 2);
578 }
579 FLAG_SET_ERGO(CICompilerCount, count);
580 }
581 #else
582 // On 32-bit systems, the number of compiler threads is limited to 3.
583 // On these systems, the virtual address space available to the JVM
587 // available cores can result in the exhaustion of the address space
588 /// available to the VM and thus cause the VM to crash.
589 if (FLAG_IS_DEFAULT(CICompilerCount)) {
590 count = 3;
591 FLAG_SET_ERGO(CICompilerCount, count);
592 }
593 #endif
594
595 if (c1_only) {
596 // No C2 compiler thread required
597 set_c1_count(count);
598 } else if (c2_only) {
599 set_c2_count(count);
600 } else {
601 #if INCLUDE_JVMCI
602 if (UseJVMCICompiler && UseJVMCINativeLibrary) {
603 int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
604 int c1_count = MAX2(count - libjvmci_count, 1);
605 set_c2_count(libjvmci_count);
606 set_c1_count(c1_count);
607 } else
608 #endif
609 {
610 set_c1_count(MAX2(count / 3, 1));
611 set_c2_count(MAX2(count - c1_count(), 1));
612 }
613 }
614 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
615 set_increase_threshold_at_ratio();
616 }
617 set_start_time(nanos_to_millis(os::javaTimeNanos()));
618 }
619
620
621 #ifdef ASSERT
622 bool CompilationPolicy::verify_level(CompLevel level) {
623 if (TieredCompilation && level > TieredStopAtLevel) {
624 return false;
625 }
626 // Check if there is a compiler to process the requested level
627 if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
628 return false;
629 }
630 if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
631 return false;
632 }
633
634 // Interpreter level is always valid.
635 if (level == CompLevel_none) {
636 return true;
733 }
734 }
735
736 // Called with the queue locked and with at least one element
737 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
738 CompileTask *max_blocking_task = nullptr;
739 CompileTask *max_task = nullptr;
740 Method* max_method = nullptr;
741
742 int64_t t = nanos_to_millis(os::javaTimeNanos());
743 // Iterate through the queue and find a method with a maximum rate.
744 for (CompileTask* task = compile_queue->first(); task != nullptr;) {
745 CompileTask* next_task = task->next();
746 // If a method was unloaded or has been stale for some time, remove it from the queue.
747 // Blocking tasks and tasks submitted from whitebox API don't become stale
748 if (task->is_unloaded()) {
749 compile_queue->remove_and_mark_stale(task);
750 task = next_task;
751 continue;
752 }
753 if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
754 // CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
755 // selection and/or any level adjustments. Just return them in order.
756 return task;
757 }
758 Method* method = task->method();
759 methodHandle mh(THREAD, method);
760 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
761 if (PrintTieredEvents) {
762 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
763 }
764 method->clear_queued_for_compilation();
765 compile_queue->remove_and_mark_stale(task);
766 task = next_task;
767 continue;
768 }
769 update_rate(t, mh);
770 if (max_task == nullptr || compare_methods(method, max_method)) {
771 // Select a method with the highest rate
772 max_task = task;
773 max_method = method;
774 }
775
776 if (task->is_blocking()) {
777 if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
778 max_blocking_task = task;
779 }
780 }
781
782 task = next_task;
783 }
784
785 if (max_blocking_task != nullptr) {
786 // In blocking compilation mode, the CompileBroker will make
787 // compilations submitted by a JVMCI compiler thread non-blocking. These
788 // compilations should be scheduled after all blocking compilations
789 // to service non-compiler related compilations sooner and reduce the
790 // chance of such compilations timing out.
791 max_task = max_blocking_task;
792 max_method = max_task->method();
793 }
794
795 methodHandle max_method_h(THREAD, max_method);
796
797 if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
798 max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
799 max_task->set_comp_level(CompLevel_limited_profile);
800
801 if (CompileBroker::compilation_is_complete(max_method_h, max_task->osr_bci(), CompLevel_limited_profile)) {
802 if (PrintTieredEvents) {
803 print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
804 }
805 compile_queue->remove_and_mark_stale(max_task);
806 max_method->clear_queued_for_compilation();
807 return nullptr;
808 }
809
810 if (PrintTieredEvents) {
811 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
812 }
813 }
814 return max_task;
815 }
816
817 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
818 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
819 if (PrintTieredEvents) {
820 print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
821 }
822 MethodData* mdo = sd->method()->method_data();
823 if (mdo != nullptr) {
824 mdo->reset_start_counters();
825 }
826 if (sd->is_top()) break;
827 }
828 }
829
830 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
831 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
832 if (PrintTieredEvents) {
833 print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
918 if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
919 nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
920 if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
921 // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
922 osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
923 }
924 compile(mh, bci, CompLevel_simple, THREAD);
925 }
926 return;
927 }
928 }
929 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
930 return;
931 }
932 if (!CompileBroker::compilation_is_in_queue(mh)) {
933 if (PrintTieredEvents) {
934 print_event(COMPILE, mh(), mh(), bci, level);
935 }
936 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
937 update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
938 CompileBroker::compile_method(mh, bci, level, hot_count, CompileTask::Reason_Tiered, THREAD);
939 }
940 }
941
942 // update_rate() is called from select_task() while holding a compile queue lock.
943 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
944 // Skip update if counters are absent.
945 // Can't allocate them since we are holding compile queue lock.
946 if (method->method_counters() == nullptr) return;
947
948 if (is_old(method)) {
949 // We don't remove old methods from the queue,
950 // so we can just zero the rate.
951 method->set_rate(0);
952 return;
953 }
954
955 // We don't update the rate if we've just came out of a safepoint.
956 // delta_s is the time since last safepoint in milliseconds.
957 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
958 int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1001 }
1002
1003 double CompilationPolicy::weight(Method* method) {
1004 return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1005 }
1006
1007 // Apply heuristics and return true if x should be compiled before y
1008 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1009 if (x->highest_comp_level() > y->highest_comp_level()) {
1010 // recompilation after deopt
1011 return true;
1012 } else
1013 if (x->highest_comp_level() == y->highest_comp_level()) {
1014 if (weight(x) > weight(y)) {
1015 return true;
1016 }
1017 }
1018 return false;
1019 }
1020
1021 // Is method profiled enough?
1022 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1023 MethodData* mdo = method->method_data();
1024 if (mdo != nullptr) {
1025 int i = mdo->invocation_count_delta();
1026 int b = mdo->backedge_count_delta();
1027 return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1028 }
1029 return false;
1030 }
1031
1032
1033 // Determine is a method is mature.
1034 bool CompilationPolicy::is_mature(MethodData* mdo) {
1035 if (Arguments::is_compiler_only()) {
1036 // Always report profiles as immature with -Xcomp
1037 return false;
1038 }
1039 methodHandle mh(Thread::current(), mdo->method());
1040 if (mdo != nullptr) {
1047 }
1048
1049 // If a method is old enough and is still in the interpreter we would want to
1050 // start profiling without waiting for the compiled method to arrive.
1051 // We also take the load on compilers into the account.
1052 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1053 if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1054 return false;
1055 }
1056
1057 if (TrainingData::have_data()) {
1058 MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1059 if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1060 return true;
1061 }
1062 }
1063
1064 if (is_old(method)) {
1065 return true;
1066 }
1067
1068 int i = method->invocation_count();
1069 int b = method->backedge_count();
1070 double k = Tier0ProfilingStartPercentage / 100.0;
1071
1072 // If the top level compiler is not keeping up, delay profiling.
1073 if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1074 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1075 }
1076 return false;
1077 }
1078
1079 // Inlining control: if we're compiling a profiled method with C1 and the callee
1080 // is known to have OSRed in a C2 version, don't inline it.
1081 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1082 CompLevel comp_level = (CompLevel)env->comp_level();
1083 if (comp_level == CompLevel_full_profile ||
1084 comp_level == CompLevel_limited_profile) {
1085 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1086 }
1087 return false;
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLinkedClassBulkLoader.hpp"
26 #include "code/aotCodeCache.hpp"
27 #include "code/scopeDesc.hpp"
28 #include "compiler/compilationPolicy.hpp"
29 #include "compiler/compileBroker.hpp"
30 #include "compiler/compilerDefinitions.inline.hpp"
31 #include "compiler/compilerOracle.hpp"
32 #include "compiler/recompilationPolicy.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "oops/method.inline.hpp"
35 #include "oops/methodData.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/trainingData.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "runtime/arguments.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/frame.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/globals_extension.hpp"
44 #include "runtime/handles.inline.hpp"
45 #include "runtime/safepoint.hpp"
46 #include "runtime/safepointVerifiers.hpp"
47 #ifdef COMPILER1
48 #include "c1/c1_Compiler.hpp"
49 #endif
50 #ifdef COMPILER2
51 #include "opto/c2compiler.hpp"
52 #endif
53 #if INCLUDE_JVMCI
54 #include "jvmci/jvmci.hpp"
55 #endif
56
57 int64_t CompilationPolicy::_start_time = 0;
58 int CompilationPolicy::_c1_count = 0;
59 int CompilationPolicy::_c2_count = 0;
60 int CompilationPolicy::_c3_count = 0;
61 int CompilationPolicy::_ac_count = 0;
62 double CompilationPolicy::_increase_threshold_at_ratio = 0;
63
64 CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
65
66 void compilationPolicy_init() {
67 CompilationPolicy::initialize();
68 }
69
70 int CompilationPolicy::compiler_count(CompLevel comp_level) {
71 if (is_c1_compile(comp_level)) {
72 return c1_count();
73 } else if (is_c2_compile(comp_level)) {
74 return c2_count();
75 }
76 return 0;
77 }
78
79 // Returns true if m must be compiled before executing it
80 // This is intended to force compiles for methods (usually for
81 // debugging) that would otherwise be interpreted for some reason.
82 bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level) {
83 // Don't allow Xcomp to cause compiles in replay mode
84 if (ReplayCompiles) return false;
85
86 if (m->has_compiled_code()) return false; // already compiled
87 if (!can_be_compiled(m, comp_level)) return false;
88
89 return !UseInterpreter || // must compile all methods
90 (AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
91 }
92
93 void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
94 if (m->method_holder()->is_not_initialized()) {
95 // 'is_not_initialized' means not only '!is_initialized', but also that
96 // initialization has not been started yet ('!being_initialized')
97 // Do not force compilation of methods in uninitialized classes.
98 return;
99 }
100 if (!m->is_native() && MethodTrainingData::have_data()) {
101 MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
102 if (mtd == nullptr) {
103 return; // there is no training data recorded for m
104 }
105 bool recompile = m->code_has_clinit_barriers();
106 CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
107 CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
108 if ((next_level != cur_level || recompile) && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
109 bool requires_online_compilation = false;
110 CompileTrainingData* ctd = mtd->last_toplevel_compile(next_level);
111 if (ctd != nullptr) {
112 requires_online_compilation = (ctd->init_deps_left() > 0);
113 }
114 if (requires_online_compilation && recompile) {
115 return;
116 }
117 if (PrintTieredEvents) {
118 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
119 }
120 CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, requires_online_compilation, CompileTask::Reason_MustBeCompiled, THREAD);
121 if (HAS_PENDING_EXCEPTION) {
122 CLEAR_PENDING_EXCEPTION;
123 }
124 }
125 }
126 }
127
128 void CompilationPolicy::maybe_compile_early_after_init(const methodHandle& m, TRAPS) {
129 assert(m->method_holder()->is_initialized(), "Should be called after class initialization");
130 maybe_compile_early(m, THREAD);
131 }
132
133 void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
134 if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
135 // don't force compilation, resolve was on behalf of compiler
136 return;
137 }
138 if (m->method_holder()->is_not_initialized()) {
139 // 'is_not_initialized' means not only '!is_initialized', but also that
140 // initialization has not been started yet ('!being_initialized')
141 // Do not force compilation of methods in uninitialized classes.
142 // Note that doing this would throw an assert later,
143 // in CompileBroker::compile_method.
144 // We sometimes use the link resolver to do reflective lookups
145 // even before classes are initialized.
146 return;
147 }
148
149 if (must_be_compiled(m)) {
150 // This path is unusual, mostly used by the '-Xcomp' stress test mode.
151 CompLevel level = initial_compile_level(m);
152 if (PrintTieredEvents) {
153 print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
154 }
155 CompileBroker::compile_method(m, InvocationEntryBci, level, 0, false, CompileTask::Reason_MustBeCompiled, THREAD);
156 }
157 }
158
159 void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
160 if (!klass->has_init_deps_processed()) {
161 ResourceMark rm;
162 log_debug(training)("Replay training: %s", klass->external_name());
163
164 KlassTrainingData* ktd = KlassTrainingData::find(klass);
165 if (ktd != nullptr) {
166 guarantee(ktd->has_holder(), "");
167 ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
168 assert(klass->has_init_deps_processed(), "");
169
170 if (AOTCompileEagerly) {
171 ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
172 if (ctd->init_deps_left() == 0) {
173 MethodTrainingData* mtd = ctd->method();
174 if (mtd->has_holder()) {
175 const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
176 CompilationPolicy::maybe_compile_early(mh, THREAD);
177 }
178 }
179 });
180 }
181 }
182 }
183 }
184
185 void CompilationPolicy::flush_replay_training_at_init(TRAPS) {
186 MonitorLocker locker(THREAD, TrainingReplayQueue_lock);
187 while (!_training_replay_queue.is_empty_unlocked()) {
188 locker.wait(); // let the replay training thread drain the queue
189 }
190 }
191
192 void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
193 assert(klass->is_initialized(), "");
194 if (TrainingData::have_data() && klass->is_shared()) {
195 _training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
196 }
197 }
198
199 // For TrainingReplayQueue
200 template<>
201 void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
202 int pos = 0;
203 for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
204 ResourceMark rm;
205 InstanceKlass* ik = cur->value();
206 st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
207 }
208 }
209
210 void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
211 while (!CompileBroker::is_compilation_disabled_forever() || AOTVerifyTrainingData) {
212 InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
213 if (ik != nullptr) {
214 replay_training_at_init_impl(ik, THREAD);
215 }
216 }
217 }
218
219 static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
220 if (comp_level == CompLevel_any) {
221 if (CompilerConfig::is_c1_only()) {
222 comp_level = CompLevel_simple;
223 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
224 comp_level = CompLevel_full_optimization;
225 }
226 }
227 return comp_level;
228 }
229
230 // Returns true if m is allowed to be compiled
231 bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
293 #endif
294 return compile_queue->first();
295 }
296
297 // Simple methods are as good being compiled with C1 as C2.
298 // Determine if a given method is such a case.
299 bool CompilationPolicy::is_trivial(const methodHandle& method) {
300 if (method->is_accessor() ||
301 method->is_constant_getter()) {
302 return true;
303 }
304 return false;
305 }
306
307 bool CompilationPolicy::force_comp_at_level_simple(const methodHandle& method) {
308 if (CompilationModeFlag::quick_internal()) {
309 #if INCLUDE_JVMCI
310 if (UseJVMCICompiler) {
311 AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization);
312 if (comp != nullptr && comp->is_jvmci() && ((JVMCICompiler*) comp)->force_comp_at_level_simple(method)) {
313 return !AOTCodeCache::is_C3_on();
314 }
315 }
316 #endif
317 }
318 return false;
319 }
320
321 CompLevel CompilationPolicy::comp_level(Method* method) {
322 nmethod *nm = method->code();
323 if (nm != nullptr && nm->is_in_use()) {
324 return (CompLevel)nm->comp_level();
325 }
326 return CompLevel_none;
327 }
328
329 // Call and loop predicates determine whether a transition to a higher
330 // compilation level should be performed (pointers to predicate functions
331 // are passed to common()).
332 // Tier?LoadFeedback is basically a coefficient that determines of
333 // how many methods per compiler thread can be in the queue before
481 // Print an event.
482 void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
483 bool inlinee_event = m != im;
484
485 ttyLocker tty_lock;
486 tty->print("%lf: [", os::elapsedTime());
487
488 switch(type) {
489 case CALL:
490 tty->print("call");
491 break;
492 case LOOP:
493 tty->print("loop");
494 break;
495 case COMPILE:
496 tty->print("compile");
497 break;
498 case FORCE_COMPILE:
499 tty->print("force-compile");
500 break;
501 case FORCE_RECOMPILE:
502 tty->print("force-recompile");
503 break;
504 case REMOVE_FROM_QUEUE:
505 tty->print("remove-from-queue");
506 break;
507 case UPDATE_IN_QUEUE:
508 tty->print("update-in-queue");
509 break;
510 case REPROFILE:
511 tty->print("reprofile");
512 break;
513 case MAKE_NOT_ENTRANT:
514 tty->print("make-not-entrant");
515 break;
516 default:
517 tty->print("unknown");
518 }
519
520 tty->print(" level=%d ", level);
521
522 ResourceMark rm;
523 char *method_name = m->name_and_sig_as_C_string();
524 tty->print("[%s", method_name);
525 if (inlinee_event) {
526 char *inlinee_name = im->name_and_sig_as_C_string();
527 tty->print(" [%s]] ", inlinee_name);
528 }
529 else tty->print("] ");
530 tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
531 CompileBroker::queue_size(CompLevel_full_optimization));
532
533 tty->print(" rate=");
534 if (m->prev_time() == 0) tty->print("n/a");
535 else tty->print("%f", m->rate());
536
537 RecompilationPolicy::print_load_average();
538
539 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
540 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
541
542 if (type != COMPILE) {
543 print_counters("", m);
544 if (inlinee_event) {
545 print_counters("inlinee ", im);
546 }
547 tty->print(" compilable=");
548 bool need_comma = false;
549 if (!m->is_not_compilable(CompLevel_full_profile)) {
550 tty->print("c1");
551 need_comma = true;
552 }
553 if (!m->is_not_osr_compilable(CompLevel_full_profile)) {
554 if (need_comma) tty->print(",");
555 tty->print("c1-osr");
556 need_comma = true;
557 }
558 if (!m->is_not_compilable(CompLevel_full_optimization)) {
567 tty->print(" status=");
568 if (m->queued_for_compilation()) {
569 tty->print("in-queue");
570 } else tty->print("idle");
571 print_training_data("", m);
572 if (inlinee_event) {
573 print_training_data("inlinee ", im);
574 }
575 }
576 tty->print_cr("]");
577 }
578
579 void CompilationPolicy::initialize() {
580 if (!CompilerConfig::is_interpreter_only()) {
581 int count = CICompilerCount;
582 bool c1_only = CompilerConfig::is_c1_only();
583 bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
584
585 #ifdef _LP64
586 // Turn on ergonomic compiler count selection
587 if (AOTCodeCache::maybe_dumping_code()) {
588 // Assembly phase runs C1 and C2 compilation in separate phases,
589 // and can use all the CPU threads it can reach. Adjust the common
590 // options before policy starts overwriting them.
591 if (FLAG_IS_DEFAULT(UseDynamicNumberOfCompilerThreads)) {
592 FLAG_SET_ERGO(UseDynamicNumberOfCompilerThreads, false);
593 }
594 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU)) {
595 FLAG_SET_ERGO(CICompilerCountPerCPU, false);
596 }
597 if (FLAG_IS_DEFAULT(CICompilerCount)) {
598 count = MAX2(count, os::active_processor_count());
599 }
600 }
601 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
602 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
603 }
604 if (CICompilerCountPerCPU) {
605 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
606 int log_cpu = log2i(os::active_processor_count());
607 int loglog_cpu = log2i(MAX2(log_cpu, 1));
608 count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2);
609 }
610 {
611 // Make sure there is enough space in the code cache to hold all the compiler buffers
612 size_t c1_size = 0;
613 #ifdef COMPILER1
614 c1_size = Compiler::code_buffer_size();
615 #endif
616 size_t c2_size = 0;
617 #ifdef COMPILER2
618 c2_size = C2Compiler::initial_code_buffer_size();
619 #endif
620 size_t buffer_size = c1_only ? c1_size : (c1_size/3 + 2*c2_size/3);
621 int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
622 if (count > max_count) {
623 // Lower the compiler count such that all buffers fit into the code cache
624 count = MAX2(max_count, c1_only ? 1 : 2);
625 }
626 FLAG_SET_ERGO(CICompilerCount, count);
627 }
628 #else
629 // On 32-bit systems, the number of compiler threads is limited to 3.
630 // On these systems, the virtual address space available to the JVM
634 // available cores can result in the exhaustion of the address space
635 /// available to the VM and thus cause the VM to crash.
636 if (FLAG_IS_DEFAULT(CICompilerCount)) {
637 count = 3;
638 FLAG_SET_ERGO(CICompilerCount, count);
639 }
640 #endif
641
642 if (c1_only) {
643 // No C2 compiler thread required
644 set_c1_count(count);
645 } else if (c2_only) {
646 set_c2_count(count);
647 } else {
648 #if INCLUDE_JVMCI
649 if (UseJVMCICompiler && UseJVMCINativeLibrary) {
650 int libjvmci_count = MAX2((int) (count * JVMCINativeLibraryThreadFraction), 1);
651 int c1_count = MAX2(count - libjvmci_count, 1);
652 set_c2_count(libjvmci_count);
653 set_c1_count(c1_count);
654 } else if (AOTCodeCache::is_C3_on()) {
655 set_c1_count(MAX2(count / 3, 1));
656 set_c2_count(MAX2(count - c1_count(), 1));
657 set_c3_count(1);
658 } else
659 #endif
660 {
661 set_c1_count(MAX2(count / 3, 1));
662 set_c2_count(MAX2(count - c1_count(), 1));
663 }
664 }
665 if (AOTCodeCache::is_code_load_thread_on()) {
666 set_ac_count((c1_only || c2_only) ? 1 : 2); // At minimum we need 2 threads to load C1 and C2 cached code in parallel
667 }
668 assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
669 set_increase_threshold_at_ratio();
670 }
671
672 set_start_time(nanos_to_millis(os::javaTimeNanos()));
673 }
674
675
676 #ifdef ASSERT
677 bool CompilationPolicy::verify_level(CompLevel level) {
678 if (TieredCompilation && level > TieredStopAtLevel) {
679 return false;
680 }
681 // Check if there is a compiler to process the requested level
682 if (!CompilerConfig::is_c1_enabled() && is_c1_compile(level)) {
683 return false;
684 }
685 if (!CompilerConfig::is_c2_or_jvmci_compiler_enabled() && is_c2_compile(level)) {
686 return false;
687 }
688
689 // Interpreter level is always valid.
690 if (level == CompLevel_none) {
691 return true;
788 }
789 }
790
791 // Called with the queue locked and with at least one element
792 CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
793 CompileTask *max_blocking_task = nullptr;
794 CompileTask *max_task = nullptr;
795 Method* max_method = nullptr;
796
797 int64_t t = nanos_to_millis(os::javaTimeNanos());
798 // Iterate through the queue and find a method with a maximum rate.
799 for (CompileTask* task = compile_queue->first(); task != nullptr;) {
800 CompileTask* next_task = task->next();
801 // If a method was unloaded or has been stale for some time, remove it from the queue.
802 // Blocking tasks and tasks submitted from whitebox API don't become stale
803 if (task->is_unloaded()) {
804 compile_queue->remove_and_mark_stale(task);
805 task = next_task;
806 continue;
807 }
808 if (task->is_aot()) {
809 // AOTCodeCache tasks are on separate queue, and they should load fast. There is no need to walk
810 // the rest of the queue, just take the task and go.
811 return task;
812 }
813 if (task->is_blocking() && task->compile_reason() == CompileTask::Reason_Whitebox) {
814 // CTW tasks, submitted as blocking Whitebox requests, do not participate in rate
815 // selection and/or any level adjustments. Just return them in order.
816 return task;
817 }
818 Method* method = task->method();
819 methodHandle mh(THREAD, method);
820 if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
821 if (PrintTieredEvents) {
822 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
823 }
824 method->clear_queued_for_compilation();
825 method->set_pending_queue_processed(false);
826 compile_queue->remove_and_mark_stale(task);
827 task = next_task;
828 continue;
829 }
830 update_rate(t, mh);
831 if (max_task == nullptr || compare_methods(method, max_method) || compare_tasks(task, max_task)) {
832 // Select a method with the highest rate
833 max_task = task;
834 max_method = method;
835 }
836
837 if (task->is_blocking()) {
838 if (max_blocking_task == nullptr || compare_methods(method, max_blocking_task->method())) {
839 max_blocking_task = task;
840 }
841 }
842
843 task = next_task;
844 }
845
846 if (max_blocking_task != nullptr) {
847 // In blocking compilation mode, the CompileBroker will make
848 // compilations submitted by a JVMCI compiler thread non-blocking. These
849 // compilations should be scheduled after all blocking compilations
850 // to service non-compiler related compilations sooner and reduce the
851 // chance of such compilations timing out.
852 max_task = max_blocking_task;
853 max_method = max_task->method();
854 }
855
856 methodHandle max_method_h(THREAD, max_method);
857
858 if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
859 max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
860 max_task->set_comp_level(CompLevel_limited_profile);
861
862 if (CompileBroker::compilation_is_complete(max_method_h(), max_task->osr_bci(), CompLevel_limited_profile,
863 false /* requires_online_compilation */,
864 CompileTask::Reason_None)) {
865 if (PrintTieredEvents) {
866 print_event(REMOVE_FROM_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
867 }
868 compile_queue->remove_and_mark_stale(max_task);
869 max_method->clear_queued_for_compilation();
870 return nullptr;
871 }
872
873 if (PrintTieredEvents) {
874 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
875 }
876 }
877
878 return max_task;
879 }
880
881 void CompilationPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
882 for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) {
883 if (PrintTieredEvents) {
884 print_event(REPROFILE, sd->method(), sd->method(), InvocationEntryBci, CompLevel_none);
885 }
886 MethodData* mdo = sd->method()->method_data();
887 if (mdo != nullptr) {
888 mdo->reset_start_counters();
889 }
890 if (sd->is_top()) break;
891 }
892 }
893
894 nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle& inlinee,
895 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS) {
896 if (PrintTieredEvents) {
897 print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
982 if (level == CompLevel_full_optimization && can_be_osr_compiled(mh, CompLevel_simple)) {
983 nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
984 if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
985 // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
986 osr_nm->make_not_entrant("OSR invalidation for compiling with C1");
987 }
988 compile(mh, bci, CompLevel_simple, THREAD);
989 }
990 return;
991 }
992 }
993 if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
994 return;
995 }
996 if (!CompileBroker::compilation_is_in_queue(mh)) {
997 if (PrintTieredEvents) {
998 print_event(COMPILE, mh(), mh(), bci, level);
999 }
1000 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
1001 update_rate(nanos_to_millis(os::javaTimeNanos()), mh);
1002 bool requires_online_compilation = false;
1003 if (TrainingData::have_data()) {
1004 MethodTrainingData* mtd = MethodTrainingData::find_fast(mh);
1005 if (mtd != nullptr) {
1006 CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
1007 if (ctd != nullptr) {
1008 requires_online_compilation = (ctd->init_deps_left() > 0);
1009 }
1010 }
1011 }
1012 CompileBroker::compile_method(mh, bci, level, hot_count, requires_online_compilation, CompileTask::Reason_Tiered, THREAD);
1013 }
1014 }
1015
1016 // update_rate() is called from select_task() while holding a compile queue lock.
1017 void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
1018 // Skip update if counters are absent.
1019 // Can't allocate them since we are holding compile queue lock.
1020 if (method->method_counters() == nullptr) return;
1021
1022 if (is_old(method)) {
1023 // We don't remove old methods from the queue,
1024 // so we can just zero the rate.
1025 method->set_rate(0);
1026 return;
1027 }
1028
1029 // We don't update the rate if we've just came out of a safepoint.
1030 // delta_s is the time since last safepoint in milliseconds.
1031 int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
1032 int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
1075 }
1076
1077 double CompilationPolicy::weight(Method* method) {
1078 return (double)(method->rate() + 1) * (method->invocation_count() + 1) * (method->backedge_count() + 1);
1079 }
1080
1081 // Apply heuristics and return true if x should be compiled before y
1082 bool CompilationPolicy::compare_methods(Method* x, Method* y) {
1083 if (x->highest_comp_level() > y->highest_comp_level()) {
1084 // recompilation after deopt
1085 return true;
1086 } else
1087 if (x->highest_comp_level() == y->highest_comp_level()) {
1088 if (weight(x) > weight(y)) {
1089 return true;
1090 }
1091 }
1092 return false;
1093 }
1094
1095 bool CompilationPolicy::compare_tasks(CompileTask* x, CompileTask* y) {
1096 assert(!x->is_aot() && !y->is_aot(), "AOT code caching tasks are not expected here");
1097 if (x->compile_reason() != y->compile_reason() && y->compile_reason() == CompileTask::Reason_MustBeCompiled) {
1098 return true;
1099 }
1100 return false;
1101 }
1102
1103 // Is method profiled enough?
1104 bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
1105 MethodData* mdo = method->method_data();
1106 if (mdo != nullptr) {
1107 int i = mdo->invocation_count_delta();
1108 int b = mdo->backedge_count_delta();
1109 return CallPredicate::apply_scaled(method, CompLevel_full_profile, i, b, 1);
1110 }
1111 return false;
1112 }
1113
1114
1115 // Determine is a method is mature.
1116 bool CompilationPolicy::is_mature(MethodData* mdo) {
1117 if (Arguments::is_compiler_only()) {
1118 // Always report profiles as immature with -Xcomp
1119 return false;
1120 }
1121 methodHandle mh(Thread::current(), mdo->method());
1122 if (mdo != nullptr) {
1129 }
1130
1131 // If a method is old enough and is still in the interpreter we would want to
1132 // start profiling without waiting for the compiled method to arrive.
1133 // We also take the load on compilers into the account.
1134 bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel cur_level) {
1135 if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
1136 return false;
1137 }
1138
1139 if (TrainingData::have_data()) {
1140 MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
1141 if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
1142 return true;
1143 }
1144 }
1145
1146 if (is_old(method)) {
1147 return true;
1148 }
1149 int i = method->invocation_count();
1150 int b = method->backedge_count();
1151 double k = Tier0ProfilingStartPercentage / 100.0;
1152
1153 // If the top level compiler is not keeping up, delay profiling.
1154 if (CompileBroker::queue_size(CompLevel_full_optimization) <= Tier0Delay * compiler_count(CompLevel_full_optimization)) {
1155 return CallPredicate::apply_scaled(method, CompLevel_none, i, b, k) || LoopPredicate::apply_scaled(method, CompLevel_none, i, b, k);
1156 }
1157 return false;
1158 }
1159
1160 // Inlining control: if we're compiling a profiled method with C1 and the callee
1161 // is known to have OSRed in a C2 version, don't inline it.
1162 bool CompilationPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
1163 CompLevel comp_level = (CompLevel)env->comp_level();
1164 if (comp_level == CompLevel_full_profile ||
1165 comp_level == CompLevel_limited_profile) {
1166 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
1167 }
1168 return false;
|