1 /* 2 * Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_COMPILER_COMPILATIONPOLICY_HPP 26 #define SHARE_COMPILER_COMPILATIONPOLICY_HPP 27 28 #include "code/nmethod.hpp" 29 #include "compiler/compileBroker.hpp" 30 #include "oops/methodData.hpp" 31 #include "oops/trainingData.hpp" 32 #include "utilities/globalDefinitions.hpp" 33 34 namespace CompilationPolicyUtils { 35 template<int SAMPLE_COUNT = 256> 36 class WeightedMovingAverage { 37 int _current; 38 int _samples[SAMPLE_COUNT]; 39 int64_t _timestamps[SAMPLE_COUNT]; 40 41 void sample(int s, int64_t t) { 42 assert(s >= 0, "Negative sample values are not supported"); 43 _samples[_current] = s; 44 _timestamps[_current] = t; 45 if (++_current >= SAMPLE_COUNT) { 46 _current = 0; 47 } 48 } 49 50 // Since sampling happens at irregular invervals the solution is to 51 // discount the older samples proportionally to the time between 52 // the now and the time of the sample. 53 double value(int64_t t) const { 54 double decay_speed = 1; 55 double weighted_sum = 0; 56 int count = 0; 57 for (int i = 0; i < SAMPLE_COUNT; i++) { 58 if (_samples[i] >= 0) { 59 count++; 60 double delta_t = (t - _timestamps[i]) / 1000.0; // in seconds 61 if (delta_t < 1) delta_t = 1; 62 weighted_sum += (double) _samples[i] / (delta_t * decay_speed); 63 } 64 } 65 if (count > 0) { 66 return weighted_sum / count; 67 } else { 68 return 0; 69 } 70 } 71 static int64_t time() { 72 return nanos_to_millis(os::javaTimeNanos()); 73 } 74 public: 75 WeightedMovingAverage() : _current(0) { 76 for (int i = 0; i < SAMPLE_COUNT; i++) { 77 _samples[i] = -1; 78 } 79 } 80 void sample(int s) { sample(s, time()); } 81 double value() const { return value(time()); } 82 }; 83 84 template<typename T> 85 class Queue { 86 class QueueNode : public CHeapObj<mtCompiler> { 87 T* _value; 88 QueueNode* _next; 89 public: 90 QueueNode(T* value, QueueNode* next) : _value(value), _next(next) { } 91 T* value() const { return _value; } 92 void set_next(QueueNode* next) { _next = next; } 93 QueueNode* next() const { return _next; } 94 }; 95 96 QueueNode* _head; 97 QueueNode* _tail; 98 99 void push_unlocked(T* value) { 100 QueueNode* n = new QueueNode(value, nullptr); 101 if (_tail != nullptr) { 102 _tail->set_next(n); 103 } 104 _tail = n; 105 if (_head == nullptr) { 106 _head = _tail; 107 } 108 } 109 T* pop_unlocked() { 110 QueueNode* n = _head; 111 if (_head != nullptr) { 112 _head = _head->next(); 113 } 114 if (_head == nullptr) { 115 _tail = _head; 116 } 117 T* value = nullptr; 118 if (n != nullptr) { 119 value = n->value(); 120 delete n; 121 } 122 return value; 123 } 124 public: 125 Queue() : _head(nullptr), _tail(nullptr) { } 126 void push(T* value, Monitor* lock, TRAPS) { 127 MonitorLocker locker(THREAD, lock); 128 push_unlocked(value); 129 locker.notify_all(); 130 } 131 132 bool is_empty_unlocked() const { return _head == nullptr; } 133 134 T* pop(Monitor* lock, TRAPS) { 135 MonitorLocker locker(THREAD, lock); 136 while(is_empty_unlocked() && !CompileBroker::is_compilation_disabled_forever()) { 137 locker.notify_all(); // notify that queue is empty 138 locker.wait(); 139 } 140 T* value = pop_unlocked(); 141 return value; 142 } 143 144 T* try_pop(Monitor* lock, TRAPS) { 145 MonitorLocker locker(THREAD, lock); 146 T* value = nullptr; 147 if (!is_empty_unlocked()) { 148 value = pop_unlocked(); 149 } 150 return value; 151 } 152 153 void print_on(outputStream* st); 154 }; 155 } // namespace CompilationPolicyUtils 156 157 class CompileTask; 158 class CompileQueue; 159 /* 160 * The system supports 5 execution levels: 161 * * level 0 - interpreter (Profiling is tracked by a MethodData object, or MDO in short) 162 * * level 1 - C1 with full optimization (no profiling) 163 * * level 2 - C1 with invocation and backedge counters 164 * * level 3 - C1 with full profiling (level 2 + All other MDO profiling information) 165 * * level 4 - C2 with full profile guided optimization 166 * 167 * The MethodData object is created by both the interpreter or either compiler to store any 168 * profiling information collected on a method (ciMethod::ensure_method_data() for C1 and C2 169 * and CompilationPolicy::create_mdo() for the interpreter). Both the interpreter and code 170 * compiled by C1 at level 3 will constantly update profiling information in the MDO during 171 * execution. The information in the MDO is then used by C1 and C2 during compilation, via 172 * the compiler interface (ciMethodXXX). 173 * See ciMethod.cpp and ciMethodData.cpp for information transfer from an MDO to the compilers 174 * through the compiler interface. 175 * 176 * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters 177 * (invocation counters and backedge counters). The frequency of these notifications is 178 * different at each level. These notifications are used by the policy to decide what transition 179 * to make. 180 * 181 * Execution starts at level 0 (interpreter), then the policy can decide either to compile the 182 * method at level 3 or level 2. The decision is based on the following factors: 183 * 1. The length of the C2 queue determines the next level. The observation is that level 2 184 * is generally faster than level 3 by about 30%, therefore we would want to minimize the time 185 * a method spends at level 3. We should only spend the time at level 3 that is necessary to get 186 * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to 187 * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile 188 * request makes its way through the long queue. When the load on C2 recedes we are going to 189 * recompile at level 3 and start gathering profiling information. 190 * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce 191 * additional filtering if the compiler is overloaded. The rationale is that by the time a 192 * method gets compiled it can become unused, so it doesn't make sense to put too much onto the 193 * queue. 194 * 195 * After profiling is completed at level 3 the transition is made to level 4. Again, the length 196 * of the C2 queue is used as a feedback to adjust the thresholds. 197 * 198 * After the first C1 compile some basic information is determined about the code like the number 199 * of the blocks and the number of the loops. Based on that it can be decided that a method 200 * is trivial and compiling it with C1 will yield the same code. In this case the method is 201 * compiled at level 1 instead of 4. 202 * 203 * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of 204 * the code and the C2 queue is sufficiently small we can decide to start profiling in the 205 * interpreter (and continue profiling in the compiled code once the level 3 version arrives). 206 * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 207 * version is compiled instead in order to run faster waiting for a level 4 version. 208 * 209 * Compile queues are implemented as priority queues - for each method in the queue we compute 210 * the event rate (the number of invocation and backedge counter increments per unit of time). 211 * When getting an element off the queue we pick the one with the largest rate. Maintaining the 212 * rate also allows us to remove stale methods (the ones that got on the queue but stopped 213 * being used shortly after that). 214 */ 215 216 /* Command line options: 217 * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method 218 * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread 219 * makes a call into the runtime. 220 * 221 * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control 222 * compilation thresholds. 223 * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. 224 * Other thresholds work as follows: 225 * 226 * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when 227 * the following predicate is true (X is the level): 228 * 229 * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), 230 * 231 * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling 232 * coefficient that will be discussed further. 233 * The intuition is to equalize the time that is spend profiling each method. 234 * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be 235 * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come 236 * from Method* and for 3->4 transition they come from MDO (since profiled invocations are 237 * counted separately). Finally, if a method does not contain anything worth profiling, a transition 238 * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than 239 * what is specified by Tier4InvocationThreshold). 240 * 241 * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. 242 * 243 * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending 244 * on the compiler load. The scaling coefficients are computed as follows: 245 * 246 * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, 247 * 248 * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X 249 * is the number of level X compiler threads. 250 * 251 * Basically these parameters describe how many methods should be in the compile queue 252 * per compiler thread before the scaling coefficient increases by one. 253 * 254 * This feedback provides the mechanism to automatically control the flow of compilation requests 255 * depending on the machine speed, mutator load and other external factors. 256 * 257 * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. 258 * Consider the following observation: a method compiled with full profiling (level 3) 259 * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). 260 * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue 261 * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues 262 * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. 263 * The idea is to dynamically change the behavior of the system in such a way that if a substantial 264 * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. 265 * And then when the load decreases to allow 2->3 transitions. 266 * 267 * Tier3Delay* parameters control this switching mechanism. 268 * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy 269 * no longer does 0->3 transitions but does 0->2 transitions instead. 270 * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue 271 * per compiler thread falls below the specified amount. 272 * The hysteresis is necessary to avoid jitter. 273 * 274 * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. 275 * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to 276 * compile from the compile queue, we also can detect stale methods for which the rate has been 277 * 0 for some time in the same iteration. Stale methods can appear in the queue when an application 278 * abruptly changes its behavior. 279 * 280 * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick 281 * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything 282 * with pure c1. 283 * 284 * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the 285 * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the 286 * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled 287 * version in time. This reduces the overall transition to level 4 and decreases the startup time. 288 * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long 289 * these is not reason to start profiling prematurely. 290 * 291 * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. 292 * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered 293 * to be zero if no events occurred in TieredRateUpdateMaxTime. 294 */ 295 296 class CompilationPolicy : AllStatic { 297 friend class CallPredicate; 298 friend class LoopPredicate; 299 300 typedef CompilationPolicyUtils::WeightedMovingAverage<> LoadAverage; 301 typedef CompilationPolicyUtils::Queue<InstanceKlass> TrainingReplayQueue; 302 303 static int64_t _start_time; 304 static int _c1_count, _c2_count, _c3_count, _sc_count; 305 static double _increase_threshold_at_ratio; 306 static LoadAverage _load_average; 307 static volatile bool _recompilation_done; 308 static TrainingReplayQueue _training_replay_queue; 309 310 // Set carry flags in the counters (in Method* and MDO). 311 inline static void handle_counter_overflow(const methodHandle& method); 312 #ifdef ASSERT 313 // Verify that a level is consistent with the compilation mode 314 static bool verify_level(CompLevel level); 315 #endif 316 // Clamp the request level according to various constraints. 317 inline static CompLevel limit_level(CompLevel level); 318 // Common transition function. Given a predicate determines if a method should transition to another level. 319 template<typename Predicate> 320 static CompLevel common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback = false); 321 322 template<typename Predicate> 323 static CompLevel transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback); 324 template<typename Predicate> 325 static CompLevel transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback); 326 template<typename Predicate> 327 static CompLevel transition_from_full_profile(const methodHandle& method, CompLevel cur_level); 328 template<typename Predicate> 329 static CompLevel standard_transition(const methodHandle& method, CompLevel cur_level, bool delayprof, bool disable_feedback); 330 331 static CompLevel trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); 332 static CompLevel trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); 333 static CompLevel trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); 334 static CompLevel trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD); 335 336 // Transition functions. 337 // call_event determines if a method should be compiled at a different 338 // level with a regular invocation entry. 339 static CompLevel call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD); 340 // loop_event checks if a method should be OSR compiled at a different 341 // level. 342 static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD); 343 static void print_counters(const char* prefix, Method* m); 344 static void print_training_data(const char* prefix, Method* method); 345 // Has a method been long around? 346 // We don't remove old methods from the compile queue even if they have 347 // very low activity (see select_task()). 348 inline static bool is_old(const methodHandle& method); 349 // Was a given method inactive for a given number of milliseconds. 350 // If it is, we would remove it from the queue (see select_task()). 351 inline static bool is_stale(int64_t t, int64_t timeout, const methodHandle& method); 352 // Compute the weight of the method for the compilation scheduling 353 inline static double weight(Method* method); 354 // Apply heuristics and return true if x should be compiled before y 355 inline static bool compare_methods(Method* x, Method* y); 356 inline static bool compare_tasks(CompileTask* x, CompileTask* y); 357 // Compute event rate for a given method. The rate is the number of event (invocations + backedges) 358 // per millisecond. 359 inline static void update_rate(int64_t t, const methodHandle& method); 360 // Compute threshold scaling coefficient 361 inline static double threshold_scale(CompLevel level, int feedback_k); 362 // If a method is old enough and is still in the interpreter we would want to 363 // start profiling without waiting for the compiled method to arrive. This function 364 // determines whether we should do that. 365 inline static bool should_create_mdo(const methodHandle& method, CompLevel cur_level); 366 // Create MDO if necessary. 367 static void create_mdo(const methodHandle& mh, JavaThread* THREAD); 368 // Is method profiled enough? 369 static bool is_method_profiled(const methodHandle& method); 370 371 static void set_c1_count(int x) { _c1_count = x; } 372 static void set_c2_count(int x) { _c2_count = x; } 373 static void set_c3_count(int x) { _c3_count = x; } 374 static void set_sc_count(int x) { _sc_count = x; } 375 376 enum EventType { CALL, LOOP, COMPILE, FORCE_COMPILE, FORCE_RECOMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; 377 static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level); 378 // Check if the method can be compiled, change level if necessary 379 static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS); 380 // Simple methods are as good being compiled with C1 as C2. 381 // This function tells if it's such a function. 382 inline static bool is_trivial(const methodHandle& method); 383 // Force method to be compiled at CompLevel_simple? 384 inline static bool force_comp_at_level_simple(const methodHandle& method); 385 386 // Get a compilation level for a given method. 387 static CompLevel comp_level(Method* method); 388 static void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, 389 CompLevel level, nmethod* nm, TRAPS); 390 static void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, 391 int bci, CompLevel level, nmethod* nm, TRAPS); 392 393 static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } 394 static void set_start_time(int64_t t) { _start_time = t; } 395 static int64_t start_time() { return _start_time; } 396 397 // m must be compiled before executing it 398 static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_any); 399 static void maybe_compile_early(const methodHandle& m, TRAPS); 400 static void maybe_compile_early_after_init(const methodHandle& m, TRAPS); 401 static void replay_training_at_init_impl(InstanceKlass* klass, TRAPS); 402 public: 403 static int min_invocations() { return Tier4MinInvocationThreshold; } 404 static int c1_count() { return _c1_count; } 405 static int c2_count() { return _c2_count; } 406 static int c3_count() { return _c3_count; } 407 static int sc_count() { return _sc_count; } 408 static int compiler_count(CompLevel comp_level); 409 // If m must_be_compiled then request a compilation from the CompileBroker. 410 // This supports the -Xcomp option. 411 static void compile_if_required(const methodHandle& m, TRAPS); 412 413 static void replay_training_at_init(bool is_on_shutdown, TRAPS); 414 static void replay_training_at_init(InstanceKlass* klass, TRAPS); 415 static void replay_training_at_init_loop(TRAPS); 416 417 // m is allowed to be compiled 418 static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_any); 419 // m is allowed to be osr compiled 420 static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_any); 421 static bool is_compilation_enabled(); 422 423 static CompileTask* select_task_helper(CompileQueue* compile_queue); 424 // Return initial compile level to use with Xcomp (depends on compilation mode). 425 static void reprofile(ScopeDesc* trap_scope, bool is_osr); 426 static nmethod* event(const methodHandle& method, const methodHandle& inlinee, 427 int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS); 428 // Select task is called by CompileBroker. We should return a task or nullptr. 429 static CompileTask* select_task(CompileQueue* compile_queue, JavaThread* THREAD); 430 // Tell the runtime if we think a given method is adequately profiled. 431 static bool is_mature(MethodData* mdo); 432 // Initialize: set compiler thread count 433 static void initialize(); 434 static bool should_not_inline(ciEnv* env, ciMethod* callee); 435 436 // Return desired initial compilation level for Xcomp 437 static CompLevel initial_compile_level(const methodHandle& method); 438 // Return highest level possible 439 static CompLevel highest_compile_level(); 440 static void dump(); 441 442 static void sample_load_average(); 443 static bool have_recompilation_work(); 444 static bool recompilation_step(int step, TRAPS); 445 }; 446 447 #endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP