1 /*
  2  * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef CPU_X86_GLOBALS_X86_HPP
 26 #define CPU_X86_GLOBALS_X86_HPP
 27 
 28 #include "utilities/globalDefinitions.hpp"
 29 #include "utilities/macros.hpp"
 30 
 31 // Sets the default values for platform dependent flags used by the runtime system.
 32 // (see globals.hpp)
 33 
 34 define_pd_global(bool, ImplicitNullChecks,       true);  // Generate code for implicit null checks
 35 define_pd_global(bool, TrapBasedNullChecks,      false); // Not needed on x86.
 36 define_pd_global(bool, UncommonNullCast,         true);  // Uncommon-trap nulls passed to check cast
 37 
 38 define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
 39 
 40 define_pd_global(uintx, CodeCacheSegmentSize,    64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
 41 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
 42 // assign a different value for C2 without touching a number of files. Use
 43 // #ifdef to minimize the change as it's late in Mantis. -- FIXME.
 44 // c1 doesn't have this problem because the fix to 4858033 assures us
 45 // the vep is aligned at CodeEntryAlignment whereas c2 only aligns
 46 // the uep and the vep doesn't get real alignment but just slops on by
 47 // only assured that the entry instruction meets the 5 byte size requirement.
 48 #if COMPILER2_OR_JVMCI
 49 define_pd_global(intx, CodeEntryAlignment,       32);
 50 #else
 51 define_pd_global(intx, CodeEntryAlignment,       16);
 52 #endif // COMPILER2_OR_JVMCI
 53 define_pd_global(intx, OptoLoopAlignment,        16);
 54 define_pd_global(intx, InlineSmallCode,          1000);
 55 
 56 #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
 57 #define DEFAULT_STACK_RED_PAGES (1)
 58 #define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
 59 
 60 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
 61 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
 62 #define MIN_STACK_RESERVED_PAGES (0)
 63 
 64 #ifdef _LP64
 65 // Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
 66 // stack if compiled for unix and LP64. To pass stack overflow tests we need
 67 // 20 shadow pages.
 68 #define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(8) DEBUG_ONLY(+4))
 69 // For those clients that do not use write socket, we allow
 70 // the min range value to be below that of the default
 71 #define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(8) DEBUG_ONLY(+4))
 72 #else
 73 #define DEFAULT_STACK_SHADOW_PAGES (4 DEBUG_ONLY(+5))
 74 #define MIN_STACK_SHADOW_PAGES DEFAULT_STACK_SHADOW_PAGES
 75 #endif // _LP64
 76 
 77 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
 78 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
 79 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
 80 define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
 81 
 82 #ifdef _LP64
 83 define_pd_global(bool, VMContinuations, true);
 84 #else
 85 define_pd_global(bool, VMContinuations, false);
 86 #endif
 87 
 88 define_pd_global(bool, RewriteBytecodes,     true);
 89 define_pd_global(bool, RewriteFrequentPairs, true);
 90 
 91 define_pd_global(uintx, TypeProfileLevel, 111);
 92 
 93 define_pd_global(bool, CompactStrings, true);
 94 
 95 define_pd_global(bool, PreserveFramePointer, false);
 96 
 97 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
 98 
 99 define_pd_global(bool, InlineTypePassFieldsAsArgs, LP64_ONLY(true) NOT_LP64(false));
100 define_pd_global(bool, InlineTypeReturnedAsFields, LP64_ONLY(true) NOT_LP64(false));
101 
102 #define ARCH_FLAGS(develop,                                                 \
103                    product,                                                 \
104                    notproduct,                                              \
105                    range,                                                   \
106                    constraint)                                              \
107                                                                             \
108   develop(bool, IEEEPrecision, true,                                        \
109           "Enables IEEE precision (for INTEL only)")                        \
110                                                                             \
111   product(bool, UseStoreImmI16, true,                                       \
112           "Use store immediate 16-bits value instruction on x86")           \
113                                                                             \
114   product(int, UseSSE, 4,                                                   \
115           "Highest supported SSE instructions set on x86/x64")              \
116           range(0, 4)                                                       \
117                                                                             \
118   product(int, UseAVX, 3,                                                   \
119           "Highest supported AVX instructions set on x86/x64")              \
120           range(0, 3)                                                       \
121                                                                             \
122   product(bool, UseKNLSetting, false, DIAGNOSTIC,                           \
123           "Control whether Knights platform setting should be used")        \
124                                                                             \
125   product(bool, UseCLMUL, false,                                            \
126           "Control whether CLMUL instructions can be used on x86/x64")      \
127                                                                             \
128   product(bool, UseIncDec, true, DIAGNOSTIC,                                \
129           "Use INC, DEC instructions on x86")                               \
130                                                                             \
131   product(bool, UseNewLongLShift, false,                                    \
132           "Use optimized bitwise shift left")                               \
133                                                                             \
134   product(bool, UseAddressNop, false,                                       \
135           "Use '0F 1F [addr]' NOP instructions on x86 cpus")                \
136                                                                             \
137   product(bool, UseXmmLoadAndClearUpper, true,                              \
138           "Load low part of XMM register and clear upper part")             \
139                                                                             \
140   product(bool, UseXmmRegToRegMoveAll, false,                               \
141           "Copy all XMM register bits when moving value between registers") \
142                                                                             \
143   product(bool, UseXmmI2D, false,                                           \
144           "Use SSE2 CVTDQ2PD instruction to convert Integer to Double")     \
145                                                                             \
146   product(bool, UseXmmI2F, false,                                           \
147           "Use SSE2 CVTDQ2PS instruction to convert Integer to Float")      \
148                                                                             \
149   product(bool, UseUnalignedLoadStores, false,                              \
150           "Use SSE2 MOVDQU instruction for Arraycopy")                      \
151                                                                             \
152   product(bool, UseXMMForObjInit, false,                                    \
153           "Use XMM/YMM MOVDQU instruction for Object Initialization")       \
154                                                                             \
155   product(bool, UseFastStosb, false,                                        \
156           "Use fast-string operation for zeroing: rep stosb")               \
157                                                                             \
158   /* Use Restricted Transactional Memory for lock eliding */                \
159   product(bool, UseRTMLocking, false,                                       \
160           "Enable RTM lock eliding for inflated locks in compiled code")    \
161                                                                             \
162   product(bool, UseRTMForStackLocks, false, EXPERIMENTAL,                   \
163           "Enable RTM lock eliding for stack locks in compiled code")       \
164                                                                             \
165   product(bool, UseRTMDeopt, false,                                         \
166           "Perform deopt and recompilation based on RTM abort ratio")       \
167                                                                             \
168   product(int, RTMRetryCount, 5,                                            \
169           "Number of RTM retries on lock abort or busy")                    \
170           range(0, max_jint)                                                \
171                                                                             \
172   product(int, RTMSpinLoopCount, 100, EXPERIMENTAL,                         \
173           "Spin count for lock to become free before RTM retry")            \
174           range(0, max_jint)                                                \
175                                                                             \
176   product(int, RTMAbortThreshold, 1000, EXPERIMENTAL,                       \
177           "Calculate abort ratio after this number of aborts")              \
178           range(0, max_jint)                                                \
179                                                                             \
180   product(int, RTMLockingThreshold, 10000, EXPERIMENTAL,                    \
181           "Lock count at which to do RTM lock eliding without "             \
182           "abort ratio calculation")                                        \
183           range(0, max_jint)                                                \
184                                                                             \
185   product(int, RTMAbortRatio, 50, EXPERIMENTAL,                             \
186           "Lock abort ratio at which to stop use RTM lock eliding")         \
187           range(0, 100) /* natural range */                                 \
188                                                                             \
189   product(int, RTMTotalCountIncrRate, 64, EXPERIMENTAL,                     \
190           "Increment total RTM attempted lock count once every n times")    \
191           range(1, max_jint)                                                \
192           constraint(RTMTotalCountIncrRateConstraintFunc,AfterErgo)         \
193                                                                             \
194   product(intx, RTMLockingCalculationDelay, 0, EXPERIMENTAL,                \
195           "Number of milliseconds to wait before start calculating aborts " \
196           "for RTM locking")                                                \
197                                                                             \
198   product(bool, UseRTMXendForLockBusy, true, EXPERIMENTAL,                  \
199           "Use RTM Xend instead of Xabort when lock busy")                  \
200                                                                             \
201   /* assembler */                                                           \
202   product(bool, UseCountLeadingZerosInstruction, false,                     \
203           "Use count leading zeros instruction")                            \
204                                                                             \
205   product(bool, UseCountTrailingZerosInstruction, false,                    \
206           "Use count trailing zeros instruction")                           \
207                                                                             \
208   product(bool, UseSSE42Intrinsics, false,                                  \
209           "SSE4.2 versions of intrinsics")                                  \
210                                                                             \
211   product(bool, UseBMI1Instructions, false,                                 \
212           "Use BMI1 instructions")                                          \
213                                                                             \
214   product(bool, UseBMI2Instructions, false,                                 \
215           "Use BMI2 instructions")                                          \
216                                                                             \
217   product(bool, UseLibmIntrinsic, true, DIAGNOSTIC,                         \
218           "Use Libm Intrinsics")                                            \
219                                                                             \
220   /* Autodetected, see vm_version_x86.cpp */                                \
221   product(bool, EnableX86ECoreOpts, false, DIAGNOSTIC,                      \
222           "Perform Ecore Optimization")                                     \
223                                                                             \
224   /* Minimum array size in bytes to use AVX512 intrinsics */                \
225   /* for copy, inflate and fill which don't bail out early based on any */  \
226   /* condition. When this value is set to zero compare operations like */   \
227   /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
228   product(int, AVX3Threshold, 4096, DIAGNOSTIC,                             \
229              "Minimum array size in bytes to use AVX512 intrinsics"         \
230              "for copy, inflate and fill. When this value is set as zero"   \
231              "compare operations can also use AVX512 intrinsics.")          \
232              range(0, max_jint)                                             \
233              constraint(AVX3ThresholdConstraintFunc,AfterErgo)              \
234                                                                             \
235   product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC,                \
236              "Turn off JVM mitigations related to Intel micro code "        \
237              "mitigations for the Intel JCC erratum")
238 
239 // end of ARCH_FLAGS
240 
241 #endif // CPU_X86_GLOBALS_X86_HPP