1 /* 2 * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/aotCodeCache.hpp" 26 #include "code/codeCache.hpp" 27 #include "compiler/compilerDefinitions.inline.hpp" 28 #include "interpreter/invocationCounter.hpp" 29 #include "jvm_io.h" 30 #include "runtime/arguments.hpp" 31 #include "runtime/continuation.hpp" 32 #include "runtime/flags/jvmFlag.hpp" 33 #include "runtime/flags/jvmFlagAccess.hpp" 34 #include "runtime/flags/jvmFlagConstraintsCompiler.hpp" 35 #include "runtime/flags/jvmFlagLimit.hpp" 36 #include "runtime/globals.hpp" 37 #include "runtime/globals_extension.hpp" 38 #include "utilities/defaultStream.hpp" 39 40 const char* compilertype2name_tab[compiler_number_of_types] = { 41 "", 42 "c1", 43 "c2", 44 "jvmci" 45 }; 46 47 CompilationModeFlag::Mode CompilationModeFlag::_mode = CompilationModeFlag::Mode::NORMAL; 48 49 static void print_mode_unavailable(const char* mode_name, const char* reason) { 50 warning("%s compilation mode unavailable because %s.", mode_name, reason); 51 } 52 53 bool CompilationModeFlag::initialize() { 54 _mode = Mode::NORMAL; 55 // During parsing we want to be very careful not to use any methods of CompilerConfig that depend on 56 // CompilationModeFlag. 57 if (CompilationMode != nullptr) { 58 if (strcmp(CompilationMode, "default") == 0 || strcmp(CompilationMode, "normal") == 0) { 59 assert(_mode == Mode::NORMAL, "Precondition"); 60 } else if (strcmp(CompilationMode, "quick-only") == 0) { 61 if (!CompilerConfig::has_c1()) { 62 print_mode_unavailable("quick-only", "there is no c1 present"); 63 } else { 64 _mode = Mode::QUICK_ONLY; 65 } 66 } else if (strcmp(CompilationMode, "high-only") == 0) { 67 if (!CompilerConfig::has_c2() && !CompilerConfig::is_jvmci_compiler()) { 68 print_mode_unavailable("high-only", "there is no c2 or jvmci compiler present"); 69 } else { 70 _mode = Mode::HIGH_ONLY; 71 } 72 } else if (strcmp(CompilationMode, "high-only-quick-internal") == 0) { 73 if (!CompilerConfig::has_c1() || !CompilerConfig::is_jvmci_compiler()) { 74 print_mode_unavailable("high-only-quick-internal", "there is no c1 and jvmci compiler present"); 75 } else { 76 _mode = Mode::HIGH_ONLY_QUICK_INTERNAL; 77 } 78 } else { 79 print_error(); 80 return false; 81 } 82 } 83 84 // Now that the flag is parsed, we can use any methods of CompilerConfig. 85 if (normal()) { 86 if (CompilerConfig::is_c1_simple_only()) { 87 _mode = Mode::QUICK_ONLY; 88 } else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) { 89 _mode = Mode::HIGH_ONLY; 90 } else if (CompilerConfig::is_jvmci_compiler_enabled() && CompilerConfig::is_c1_enabled() && !TieredCompilation) { 91 warning("Disabling tiered compilation with non-native JVMCI compiler is not recommended, " 92 "disabling intermediate compilation levels instead. "); 93 _mode = Mode::HIGH_ONLY_QUICK_INTERNAL; 94 } 95 } 96 return true; 97 } 98 99 void CompilationModeFlag::print_error() { 100 jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', available modes are:", CompilationMode); 101 bool comma = false; 102 if (CompilerConfig::has_c1()) { 103 jio_fprintf(defaultStream::error_stream(), "%s quick-only", comma ? "," : ""); 104 comma = true; 105 } 106 if (CompilerConfig::has_c2() || CompilerConfig::has_jvmci()) { 107 jio_fprintf(defaultStream::error_stream(), "%s high-only", comma ? "," : ""); 108 comma = true; 109 } 110 if (CompilerConfig::has_c1() && CompilerConfig::has_jvmci()) { 111 jio_fprintf(defaultStream::error_stream(), "%s high-only-quick-internal", comma ? "," : ""); 112 comma = true; 113 } 114 jio_fprintf(defaultStream::error_stream(), "\n"); 115 } 116 117 // Returns threshold scaled with CompileThresholdScaling 118 intx CompilerConfig::scaled_compile_threshold(intx threshold) { 119 return scaled_compile_threshold(threshold, CompileThresholdScaling); 120 } 121 122 // Returns freq_log scaled with CompileThresholdScaling 123 intx CompilerConfig::scaled_freq_log(intx freq_log) { 124 return scaled_freq_log(freq_log, CompileThresholdScaling); 125 } 126 127 // For XXXThreshold flags, which all have a valid range of [0 .. max_jint] 128 intx CompilerConfig::jvmflag_scaled_compile_threshold(intx threshold) { 129 return MAX2((intx)0, MIN2(scaled_compile_threshold(threshold), (intx)max_jint)); 130 } 131 132 // For XXXNotifyFreqLog flags, which all have a valid range of [0 .. 30] 133 intx CompilerConfig::jvmflag_scaled_freq_log(intx freq_log) { 134 return MAX2((intx)0, MIN2(scaled_freq_log(freq_log), (intx)30)); 135 } 136 137 // Returns threshold scaled with the value of scale. 138 // If scale < 0.0, threshold is returned without scaling. 139 intx CompilerConfig::scaled_compile_threshold(intx threshold, double scale) { 140 assert(threshold >= 0, "must be"); 141 if (scale == 1.0 || scale < 0.0) { 142 return threshold; 143 } else { 144 double v = threshold * scale; 145 assert(v >= 0, "must be"); 146 if (g_isnan(v) || !g_isfinite(v)) { 147 return max_intx; 148 } 149 int exp; 150 (void) frexp(v, &exp); 151 int max_exp = sizeof(intx) * BitsPerByte - 1; 152 if (exp > max_exp) { 153 return max_intx; 154 } 155 intx r = (intx)(v); 156 assert(r >= 0, "must be"); 157 return r; 158 } 159 } 160 161 // Returns freq_log scaled with the value of scale. 162 // Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1]. 163 // If scale < 0.0, freq_log is returned without scaling. 164 intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) { 165 // Check if scaling is necessary or if negative value was specified. 166 if (scale == 1.0 || scale < 0.0) { 167 return freq_log; 168 } 169 // Check values to avoid calculating log2 of 0. 170 if (scale == 0.0 || freq_log == 0) { 171 return 0; 172 } 173 // Determine the maximum notification frequency value currently supported. 174 // The largest mask value that the interpreter/C1 can handle is 175 // of length InvocationCounter::number_of_count_bits. Mask values are always 176 // one bit shorter then the value of the notification frequency. Set 177 // max_freq_bits accordingly. 178 int max_freq_bits = InvocationCounter::number_of_count_bits + 1; 179 intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale); 180 181 if (scaled_freq == 0) { 182 // Return 0 right away to avoid calculating log2 of 0. 183 return 0; 184 } else { 185 return MIN2(log2i(scaled_freq), max_freq_bits); 186 } 187 } 188 189 void CompilerConfig::set_client_emulation_mode_flags() { 190 assert(has_c1(), "Must have C1 compiler present"); 191 CompilationModeFlag::set_quick_only(); 192 193 FLAG_SET_ERGO(ProfileInterpreter, false); 194 #if INCLUDE_JVMCI 195 FLAG_SET_ERGO(EnableJVMCI, false); 196 FLAG_SET_ERGO(UseJVMCICompiler, false); 197 #endif 198 if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) { 199 FLAG_SET_ERGO(NeverActAsServerClassMachine, true); 200 } 201 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { 202 FLAG_SET_ERGO(InitialCodeCacheSize, 160*K); 203 } 204 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { 205 FLAG_SET_ERGO(ReservedCodeCacheSize, 32*M); 206 } 207 if (FLAG_IS_DEFAULT(NonProfiledCodeHeapSize)) { 208 FLAG_SET_ERGO(NonProfiledCodeHeapSize, 27*M); 209 } 210 if (FLAG_IS_DEFAULT(ProfiledCodeHeapSize)) { 211 FLAG_SET_ERGO(ProfiledCodeHeapSize, 0); 212 } 213 if (FLAG_IS_DEFAULT(NonNMethodCodeHeapSize)) { 214 FLAG_SET_ERGO(NonNMethodCodeHeapSize, 5*M); 215 } 216 if (FLAG_IS_DEFAULT(CodeCacheExpansionSize)) { 217 FLAG_SET_ERGO(CodeCacheExpansionSize, 32*K); 218 } 219 if (FLAG_IS_DEFAULT(MaxRAM)) { 220 // Do not use FLAG_SET_ERGO to update MaxRAM, as this will impact 221 // heap setting done based on available phys_mem (see Arguments::set_heap_size). 222 FLAG_SET_DEFAULT(MaxRAM, 1ULL*G); 223 } 224 if (FLAG_IS_DEFAULT(CICompilerCount)) { 225 FLAG_SET_ERGO(CICompilerCount, 1); 226 } 227 } 228 229 bool CompilerConfig::is_compilation_mode_selected() { 230 return !FLAG_IS_DEFAULT(TieredCompilation) || 231 !FLAG_IS_DEFAULT(TieredStopAtLevel) || 232 !FLAG_IS_DEFAULT(CompilationMode) 233 JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI) 234 || !FLAG_IS_DEFAULT(UseJVMCICompiler)); 235 } 236 237 static bool check_legacy_flags() { 238 JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold)); 239 if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) { 240 return false; 241 } 242 JVMFlag* on_stack_replace_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(OnStackReplacePercentage)); 243 if (JVMFlagAccess::check_constraint(on_stack_replace_percentage_flag, JVMFlagLimit::get_constraint(on_stack_replace_percentage_flag)->constraint_func(), false) != JVMFlag::SUCCESS) { 244 return false; 245 } 246 JVMFlag* interpreter_profile_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(InterpreterProfilePercentage)); 247 if (JVMFlagAccess::check_range(interpreter_profile_percentage_flag, false) != JVMFlag::SUCCESS) { 248 return false; 249 } 250 return true; 251 } 252 253 void CompilerConfig::set_legacy_emulation_flags() { 254 // Any legacy flags set? 255 if (!FLAG_IS_DEFAULT(CompileThreshold) || 256 !FLAG_IS_DEFAULT(OnStackReplacePercentage) || 257 !FLAG_IS_DEFAULT(InterpreterProfilePercentage)) { 258 if (CompilerConfig::is_c1_only() || CompilerConfig::is_c2_or_jvmci_compiler_only()) { 259 // This function is called before these flags are validated. In order to not confuse the user with extraneous 260 // error messages, we check the validity of these flags here and bail out if any of them are invalid. 261 if (!check_legacy_flags()) { 262 return; 263 } 264 // Note, we do not scale CompileThreshold before this because the tiered flags are 265 // all going to be scaled further in set_compilation_policy_flags(). 266 const intx threshold = CompileThreshold; 267 const intx profile_threshold = threshold * InterpreterProfilePercentage / 100; 268 const intx osr_threshold = threshold * OnStackReplacePercentage / 100; 269 const intx osr_profile_threshold = osr_threshold * InterpreterProfilePercentage / 100; 270 271 const intx threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? threshold : profile_threshold); 272 const intx osr_threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold); 273 274 if (Tier0InvokeNotifyFreqLog > threshold_log) { 275 FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, MAX2<intx>(0, threshold_log)); 276 } 277 278 // Note: Emulation oddity. The legacy policy limited the amount of callbacks from the 279 // interpreter for backedge events to once every 1024 counter increments. 280 // We simulate this behavior by limiting the backedge notification frequency to be 281 // at least 2^10. 282 if (Tier0BackedgeNotifyFreqLog > osr_threshold_log) { 283 FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, MAX2<intx>(10, osr_threshold_log)); 284 } 285 // Adjust the tiered policy flags to approximate the legacy behavior. 286 FLAG_SET_ERGO(Tier3InvocationThreshold, threshold); 287 FLAG_SET_ERGO(Tier3MinInvocationThreshold, threshold); 288 FLAG_SET_ERGO(Tier3CompileThreshold, threshold); 289 FLAG_SET_ERGO(Tier3BackEdgeThreshold, osr_threshold); 290 if (CompilerConfig::is_c2_or_jvmci_compiler_only()) { 291 FLAG_SET_ERGO(Tier4InvocationThreshold, threshold); 292 FLAG_SET_ERGO(Tier4MinInvocationThreshold, threshold); 293 FLAG_SET_ERGO(Tier4CompileThreshold, threshold); 294 FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold); 295 FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage); 296 } 297 } else { 298 // Normal tiered mode, ignore legacy flags 299 } 300 } 301 // Scale CompileThreshold 302 // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged. 303 if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0 && CompileThreshold > 0) { 304 intx scaled_value = scaled_compile_threshold(CompileThreshold); 305 if (CompileThresholdConstraintFunc(scaled_value, true) != JVMFlag::VIOLATES_CONSTRAINT) { 306 FLAG_SET_ERGO(CompileThreshold, scaled_value); 307 } 308 } 309 } 310 311 312 void CompilerConfig::set_compilation_policy_flags() { 313 if (is_tiered()) { 314 // Increase the code cache size - tiered compiles a lot more. 315 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { 316 FLAG_SET_ERGO(ReservedCodeCacheSize, 317 MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5)); 318 } 319 // Enable SegmentedCodeCache if tiered compilation is enabled, ReservedCodeCacheSize >= 240M 320 // and the code cache contains at least 8 pages (segmentation disables advantage of huge pages). 321 if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M && 322 8 * CodeCache::page_size() <= ReservedCodeCacheSize) { 323 FLAG_SET_ERGO(SegmentedCodeCache, true); 324 } 325 if (Arguments::is_compiler_only()) { // -Xcomp 326 // Be much more aggressive in tiered mode with -Xcomp and exercise C2 more. 327 // We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and 328 // compile a level 4 (C2) and then continue executing it. 329 if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) { 330 FLAG_SET_CMDLINE(Tier3InvokeNotifyFreqLog, 0); 331 } 332 if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) { 333 FLAG_SET_CMDLINE(Tier4InvocationThreshold, 0); 334 } 335 } 336 } 337 338 // Current Leyden implementation requires SegmentedCodeCache: the archive-backed code 339 // cache would be initialized only then. Force SegmentedCodeCache if we are loading/storing 340 // cached code. TODO: Resolve this in code cache initialization code. 341 if (!SegmentedCodeCache && AOTCodeCache::is_caching_enabled()) { 342 FLAG_SET_ERGO(SegmentedCodeCache, true); 343 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { 344 FLAG_SET_ERGO(ReservedCodeCacheSize, 345 MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5)); 346 } 347 } 348 349 if (CompileThresholdScaling < 0) { 350 vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", nullptr); 351 } 352 353 if (CompilationModeFlag::disable_intermediate()) { 354 if (FLAG_IS_DEFAULT(Tier0ProfilingStartPercentage)) { 355 FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33); 356 } 357 358 if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) { 359 FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000); 360 } 361 if (FLAG_IS_DEFAULT(Tier4MinInvocationThreshold)) { 362 FLAG_SET_DEFAULT(Tier4MinInvocationThreshold, 600); 363 } 364 if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) { 365 FLAG_SET_DEFAULT(Tier4CompileThreshold, 10000); 366 } 367 if (FLAG_IS_DEFAULT(Tier4BackEdgeThreshold)) { 368 FLAG_SET_DEFAULT(Tier4BackEdgeThreshold, 15000); 369 } 370 371 if (FLAG_IS_DEFAULT(Tier3InvocationThreshold)) { 372 FLAG_SET_DEFAULT(Tier3InvocationThreshold, Tier4InvocationThreshold); 373 } 374 if (FLAG_IS_DEFAULT(Tier3MinInvocationThreshold)) { 375 FLAG_SET_DEFAULT(Tier3MinInvocationThreshold, Tier4MinInvocationThreshold); 376 } 377 if (FLAG_IS_DEFAULT(Tier3CompileThreshold)) { 378 FLAG_SET_DEFAULT(Tier3CompileThreshold, Tier4CompileThreshold); 379 } 380 if (FLAG_IS_DEFAULT(Tier3BackEdgeThreshold)) { 381 FLAG_SET_DEFAULT(Tier3BackEdgeThreshold, Tier4BackEdgeThreshold); 382 } 383 384 } 385 386 // Scale tiered compilation thresholds. 387 // CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged. 388 if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) { 389 FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier0InvokeNotifyFreqLog)); 390 FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier0BackedgeNotifyFreqLog)); 391 392 FLAG_SET_ERGO(Tier3InvocationThreshold, jvmflag_scaled_compile_threshold(Tier3InvocationThreshold)); 393 FLAG_SET_ERGO(Tier3MinInvocationThreshold, jvmflag_scaled_compile_threshold(Tier3MinInvocationThreshold)); 394 FLAG_SET_ERGO(Tier3CompileThreshold, jvmflag_scaled_compile_threshold(Tier3CompileThreshold)); 395 FLAG_SET_ERGO(Tier3BackEdgeThreshold, jvmflag_scaled_compile_threshold(Tier3BackEdgeThreshold)); 396 397 // Tier2{Invocation,MinInvocation,Compile,Backedge}Threshold should be scaled here 398 // once these thresholds become supported. 399 400 FLAG_SET_ERGO(Tier2InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier2InvokeNotifyFreqLog)); 401 FLAG_SET_ERGO(Tier2BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier2BackedgeNotifyFreqLog)); 402 403 FLAG_SET_ERGO(Tier3InvokeNotifyFreqLog, jvmflag_scaled_freq_log(Tier3InvokeNotifyFreqLog)); 404 FLAG_SET_ERGO(Tier3BackedgeNotifyFreqLog, jvmflag_scaled_freq_log(Tier3BackedgeNotifyFreqLog)); 405 406 FLAG_SET_ERGO(Tier23InlineeNotifyFreqLog, jvmflag_scaled_freq_log(Tier23InlineeNotifyFreqLog)); 407 408 FLAG_SET_ERGO(Tier4InvocationThreshold, jvmflag_scaled_compile_threshold(Tier4InvocationThreshold)); 409 FLAG_SET_ERGO(Tier4MinInvocationThreshold, jvmflag_scaled_compile_threshold(Tier4MinInvocationThreshold)); 410 FLAG_SET_ERGO(Tier4CompileThreshold, jvmflag_scaled_compile_threshold(Tier4CompileThreshold)); 411 FLAG_SET_ERGO(Tier4BackEdgeThreshold, jvmflag_scaled_compile_threshold(Tier4BackEdgeThreshold)); 412 } 413 414 #ifdef COMPILER1 415 // Reduce stack usage due to inlining of methods which require much stack. 416 // (High tier compiler can inline better based on profiling information.) 417 if (FLAG_IS_DEFAULT(C1InlineStackLimit) && 418 TieredStopAtLevel == CompLevel_full_optimization && !CompilerConfig::is_c1_only()) { 419 FLAG_SET_DEFAULT(C1InlineStackLimit, 5); 420 } 421 #endif 422 423 if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) { 424 #ifdef COMPILER2 425 // Some inlining tuning 426 #if defined(X86) || defined(AARCH64) || defined(RISCV64) 427 if (FLAG_IS_DEFAULT(InlineSmallCode)) { 428 FLAG_SET_DEFAULT(InlineSmallCode, 2500); 429 } 430 #endif 431 #endif // COMPILER2 432 } 433 434 } 435 436 #if INCLUDE_JVMCI 437 void CompilerConfig::set_jvmci_specific_flags() { 438 if (UseJVMCICompiler) { 439 if (FLAG_IS_DEFAULT(TypeProfileWidth)) { 440 FLAG_SET_DEFAULT(TypeProfileWidth, 8); 441 } 442 if (FLAG_IS_DEFAULT(TypeProfileLevel)) { 443 FLAG_SET_DEFAULT(TypeProfileLevel, 0); 444 } 445 446 if (UseJVMCINativeLibrary) { 447 // SVM compiled code requires more stack space 448 if (FLAG_IS_DEFAULT(CompilerThreadStackSize)) { 449 // Duplicate logic in the implementations of os::create_thread 450 // so that we can then double the computed stack size. Once 451 // the stack size requirements of SVM are better understood, 452 // this logic can be pushed down into os::create_thread. 453 int stack_size = CompilerThreadStackSize; 454 if (stack_size == 0) { 455 stack_size = VMThreadStackSize; 456 } 457 if (stack_size != 0) { 458 FLAG_SET_DEFAULT(CompilerThreadStackSize, stack_size * 2); 459 } 460 } 461 } else { 462 // JVMCI needs values not less than defaults 463 if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) { 464 FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize)); 465 } 466 if (FLAG_IS_DEFAULT(InitialCodeCacheSize)) { 467 FLAG_SET_DEFAULT(InitialCodeCacheSize, MAX2(16*M, InitialCodeCacheSize)); 468 } 469 if (FLAG_IS_DEFAULT(NewSizeThreadIncrease)) { 470 FLAG_SET_DEFAULT(NewSizeThreadIncrease, MAX2(4*K, NewSizeThreadIncrease)); 471 } 472 if (FLAG_IS_DEFAULT(Tier3DelayOn)) { 473 // This effectively prevents the compile broker scheduling tier 2 474 // (i.e., limited C1 profiling) compilations instead of tier 3 475 // (i.e., full C1 profiling) compilations when the tier 4 queue 476 // backs up (which is quite likely when using a non-AOT compiled JVMCI 477 // compiler). The observation based on jargraal is that the downside 478 // of skipping full profiling is much worse for performance than the 479 // queue backing up. 480 FLAG_SET_DEFAULT(Tier3DelayOn, 100000); 481 } 482 } // !UseJVMCINativeLibrary 483 } // UseJVMCICompiler 484 } 485 #endif // INCLUDE_JVMCI 486 487 bool CompilerConfig::check_args_consistency(bool status) { 488 // Check lower bounds of the code cache 489 // Template Interpreter code is approximately 3X larger in debug builds. 490 uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3); 491 if (ReservedCodeCacheSize < InitialCodeCacheSize) { 492 jio_fprintf(defaultStream::error_stream(), 493 "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n", 494 ReservedCodeCacheSize/K, InitialCodeCacheSize/K); 495 status = false; 496 } else if (ReservedCodeCacheSize < min_code_cache_size) { 497 jio_fprintf(defaultStream::error_stream(), 498 "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K, 499 min_code_cache_size/K); 500 status = false; 501 } else if (ReservedCodeCacheSize > CODE_CACHE_SIZE_LIMIT) { 502 // Code cache size larger than CODE_CACHE_SIZE_LIMIT is not supported. 503 jio_fprintf(defaultStream::error_stream(), 504 "Invalid ReservedCodeCacheSize=%dM. Must be at most %uM.\n", ReservedCodeCacheSize/M, 505 CODE_CACHE_SIZE_LIMIT/M); 506 status = false; 507 } else if (NonNMethodCodeHeapSize < min_code_cache_size) { 508 jio_fprintf(defaultStream::error_stream(), 509 "Invalid NonNMethodCodeHeapSize=%dK. Must be at least %uK.\n", NonNMethodCodeHeapSize/K, 510 min_code_cache_size/K); 511 status = false; 512 } 513 514 #ifdef _LP64 515 if (!FLAG_IS_DEFAULT(CICompilerCount) && !FLAG_IS_DEFAULT(CICompilerCountPerCPU) && CICompilerCountPerCPU) { 516 warning("The VM option CICompilerCountPerCPU overrides CICompilerCount."); 517 } 518 #endif 519 520 if (BackgroundCompilation && ReplayCompiles) { 521 if (!FLAG_IS_DEFAULT(BackgroundCompilation)) { 522 warning("BackgroundCompilation disabled due to ReplayCompiles option."); 523 } 524 FLAG_SET_CMDLINE(BackgroundCompilation, false); 525 } 526 527 if (CompilerConfig::is_interpreter_only()) { 528 if (UseCompiler) { 529 if (!FLAG_IS_DEFAULT(UseCompiler)) { 530 warning("UseCompiler disabled due to -Xint."); 531 } 532 FLAG_SET_CMDLINE(UseCompiler, false); 533 } 534 if (ProfileInterpreter) { 535 if (!FLAG_IS_DEFAULT(ProfileInterpreter)) { 536 warning("ProfileInterpreter disabled due to -Xint."); 537 } 538 FLAG_SET_CMDLINE(ProfileInterpreter, false); 539 } 540 if (TieredCompilation) { 541 if (!FLAG_IS_DEFAULT(TieredCompilation)) { 542 warning("TieredCompilation disabled due to -Xint."); 543 } 544 FLAG_SET_CMDLINE(TieredCompilation, false); 545 } 546 if (SegmentedCodeCache) { 547 warning("SegmentedCodeCache has no meaningful effect with -Xint"); 548 FLAG_SET_DEFAULT(SegmentedCodeCache, false); 549 } 550 #if INCLUDE_JVMCI 551 if (EnableJVMCI || UseJVMCICompiler) { 552 if (!FLAG_IS_DEFAULT(EnableJVMCI) || !FLAG_IS_DEFAULT(UseJVMCICompiler)) { 553 warning("JVMCI Compiler disabled due to -Xint."); 554 } 555 FLAG_SET_CMDLINE(EnableJVMCI, false); 556 FLAG_SET_CMDLINE(UseJVMCICompiler, false); 557 } 558 #endif 559 } else { 560 #if INCLUDE_JVMCI 561 status = status && JVMCIGlobals::check_jvmci_flags_are_consistent(); 562 #endif 563 } 564 565 return status; 566 } 567 568 void CompilerConfig::ergo_initialize() { 569 #if !COMPILER1_OR_COMPILER2 570 return; 571 #endif 572 573 if (has_c1()) { 574 if (!is_compilation_mode_selected()) { 575 if (NeverActAsServerClassMachine) { 576 set_client_emulation_mode_flags(); 577 } 578 } else if (!has_c2() && !is_jvmci_compiler()) { 579 set_client_emulation_mode_flags(); 580 } 581 } 582 583 set_legacy_emulation_flags(); 584 set_compilation_policy_flags(); 585 586 #if INCLUDE_JVMCI 587 // Check that JVMCI supports selected GC. 588 // Should be done after GCConfig::initialize() was called. 589 JVMCIGlobals::check_jvmci_supported_gc(); 590 591 // Do JVMCI specific settings 592 set_jvmci_specific_flags(); 593 #endif 594 595 if (PreloadOnly) { 596 // Disable profiling/counter updates in interpreter and C1. 597 // This effectively disables most of the normal JIT (re-)compilations. 598 FLAG_SET_DEFAULT(ProfileInterpreter, false); 599 FLAG_SET_DEFAULT(UseOnStackReplacement, false); 600 FLAG_SET_DEFAULT(UseLoopCounter, false); 601 602 // Disable compilations through training data replay. 603 FLAG_SET_DEFAULT(AOTReplayTraining, false); 604 } 605 606 if (UseOnStackReplacement && !UseLoopCounter) { 607 warning("On-stack-replacement requires loop counters; enabling loop counters"); 608 FLAG_SET_DEFAULT(UseLoopCounter, true); 609 } 610 611 if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) { 612 if (!FLAG_IS_DEFAULT(ProfileInterpreter)) { 613 warning("ProfileInterpreter disabled due to client emulation mode"); 614 } 615 FLAG_SET_CMDLINE(ProfileInterpreter, false); 616 } 617 618 #ifdef COMPILER2 619 if (!EliminateLocks) { 620 EliminateNestedLocks = false; 621 } 622 if (!Inline || !IncrementalInline) { 623 IncrementalInline = false; 624 IncrementalInlineMH = false; 625 IncrementalInlineVirtual = false; 626 StressIncrementalInlining = false; 627 } 628 #ifndef PRODUCT 629 if (!IncrementalInline) { 630 AlwaysIncrementalInline = false; 631 } 632 if (FLAG_IS_CMDLINE(PrintIdealGraph) && !PrintIdealGraph) { 633 FLAG_SET_ERGO(PrintIdealGraphLevel, -1); 634 } 635 #endif 636 if (!UseTypeSpeculation && FLAG_IS_DEFAULT(TypeProfileLevel)) { 637 // nothing to use the profiling, turn if off 638 FLAG_SET_DEFAULT(TypeProfileLevel, 0); 639 } 640 if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) { 641 FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1); 642 } 643 if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) { 644 // blind guess 645 LoopStripMiningIterShortLoop = LoopStripMiningIter / 10; 646 } 647 #endif // COMPILER2 648 }