1 /*
2 * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_GLOBALS_X86_HPP
26 #define CPU_X86_GLOBALS_X86_HPP
27
28 #include "utilities/globalDefinitions.hpp"
29 #include "utilities/macros.hpp"
30
31 // Sets the default values for platform dependent flags used by the runtime system.
32 // (see globals.hpp)
33
34 define_pd_global(bool, ImplicitNullChecks, true); // Generate code for implicit null checks
35 define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
36 define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap nulls passed to check cast
37
38 define_pd_global(bool, DelayCompilerStubsGeneration, COMPILER2_OR_JVMCI);
39
40 define_pd_global(size_t, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
41 // See 4827828 for this change. There is no globals_core_i486.hpp. I can't
42 // assign a different value for C2 without touching a number of files. Use
43 // #ifdef to minimize the change as it's late in Mantis. -- FIXME.
44 // c1 doesn't have this problem because the fix to 4858033 assures us
45 // the vep is aligned at CodeEntryAlignment whereas c2 only aligns
46 // the uep and the vep doesn't get real alignment but just slops on by
47 // only assured that the entry instruction meets the 5 byte size requirement.
48 #if COMPILER2_OR_JVMCI
49 define_pd_global(intx, CodeEntryAlignment, 32);
50 #else
51 define_pd_global(intx, CodeEntryAlignment, 16);
52 #endif // COMPILER2_OR_JVMCI
53 define_pd_global(intx, OptoLoopAlignment, 16);
54 define_pd_global(intx, InlineSmallCode, 1000);
55
56 #define DEFAULT_STACK_YELLOW_PAGES (NOT_WINDOWS(2) WINDOWS_ONLY(3))
57 #define DEFAULT_STACK_RED_PAGES (1)
58 #define DEFAULT_STACK_RESERVED_PAGES (NOT_WINDOWS(1) WINDOWS_ONLY(0))
59
60 #define MIN_STACK_YELLOW_PAGES DEFAULT_STACK_YELLOW_PAGES
61 #define MIN_STACK_RED_PAGES DEFAULT_STACK_RED_PAGES
62 #define MIN_STACK_RESERVED_PAGES (0)
63
64 // Java_java_net_SocketOutputStream_socketWrite0() uses a 64k buffer on the
65 // stack if compiled for unix. To pass stack overflow tests we need 20 shadow pages.
66 #define DEFAULT_STACK_SHADOW_PAGES (NOT_WIN64(20) WIN64_ONLY(8) DEBUG_ONLY(+4))
67 // For those clients that do not use write socket, we allow
68 // the min range value to be below that of the default
69 #define MIN_STACK_SHADOW_PAGES (NOT_WIN64(10) WIN64_ONLY(8) DEBUG_ONLY(+4))
70
71 define_pd_global(intx, StackYellowPages, DEFAULT_STACK_YELLOW_PAGES);
72 define_pd_global(intx, StackRedPages, DEFAULT_STACK_RED_PAGES);
73 define_pd_global(intx, StackShadowPages, DEFAULT_STACK_SHADOW_PAGES);
74 define_pd_global(intx, StackReservedPages, DEFAULT_STACK_RESERVED_PAGES);
75
76 define_pd_global(bool, VMContinuations, true);
77
78 define_pd_global(bool, RewriteBytecodes, true);
79 define_pd_global(bool, RewriteFrequentPairs, true);
80
81 define_pd_global(uintx, TypeProfileLevel, 111);
82
83 define_pd_global(bool, CompactStrings, true);
84
85 define_pd_global(bool, PreserveFramePointer, false);
86
87 define_pd_global(intx, InitArrayShortSize, 8*BytesPerLong);
88
89 #define ARCH_FLAGS(develop, \
90 product, \
91 range, \
92 constraint) \
93 \
94 develop(bool, IEEEPrecision, true, \
95 "Enables IEEE precision (for INTEL only)") \
96 \
97 product(bool, UseStoreImmI16, true, \
98 "Use store immediate 16-bits value instruction on x86") \
99 \
100 product(int, UseSSE, 4, \
101 "Highest supported SSE instructions set on x86/x64") \
102 range(0, 4) \
103 \
104 product(int, UseAVX, 3, \
105 "Highest supported AVX instructions set on x86/x64") \
106 range(0, 3) \
107 \
108 product(bool, UseAPX, false, EXPERIMENTAL, \
109 "Use Intel Advanced Performance Extensions") \
110 \
111 product(bool, UseKNLSetting, false, DIAGNOSTIC, \
112 "Control whether Knights platform setting should be used") \
113 \
114 product(bool, UseCLMUL, false, \
115 "Control whether CLMUL instructions can be used on x86/x64") \
116 \
117 product(bool, UseIncDec, true, DIAGNOSTIC, \
118 "Use INC, DEC instructions on x86") \
119 \
120 product(bool, UseNewLongLShift, false, \
121 "Use optimized bitwise shift left") \
122 \
123 product(bool, UseAddressNop, false, \
124 "Use '0F 1F [addr]' NOP instructions on x86 cpus") \
125 \
126 product(bool, UseXmmLoadAndClearUpper, true, \
127 "Load low part of XMM register and clear upper part") \
128 \
129 product(bool, UseXmmRegToRegMoveAll, false, \
130 "Copy all XMM register bits when moving value between registers") \
131 \
132 product(bool, UseXmmI2D, false, \
133 "Use SSE2 CVTDQ2PD instruction to convert Integer to Double") \
134 \
135 product(bool, UseXmmI2F, false, \
136 "Use SSE2 CVTDQ2PS instruction to convert Integer to Float") \
137 \
138 product(bool, UseUnalignedLoadStores, false, \
139 "Use SSE2 MOVDQU instruction for Arraycopy") \
140 \
141 product(bool, UseXMMForObjInit, false, \
142 "Use XMM/YMM MOVDQU instruction for Object Initialization") \
143 \
144 product(bool, UseFastStosb, false, \
145 "Use fast-string operation for zeroing: rep stosb") \
146 \
147 /* assembler */ \
148 product(bool, UseCountLeadingZerosInstruction, false, \
149 "Use count leading zeros instruction") \
150 \
151 product(bool, UseCountTrailingZerosInstruction, false, \
152 "Use count trailing zeros instruction") \
153 \
154 product(bool, UseSSE42Intrinsics, false, \
155 "SSE4.2 versions of intrinsics") \
156 \
157 product(bool, UseBMI1Instructions, false, \
158 "Use BMI1 instructions") \
159 \
160 product(bool, UseBMI2Instructions, false, \
161 "Use BMI2 instructions") \
162 \
163 product(bool, UseLibmIntrinsic, true, DIAGNOSTIC, \
164 "Use Libm Intrinsics") \
165 \
166 /* Autodetected, see vm_version_x86.cpp */ \
167 product(bool, EnableX86ECoreOpts, false, DIAGNOSTIC, \
168 "Perform Ecore Optimization") \
169 \
170 /* Minimum array size in bytes to use AVX512 intrinsics */ \
171 /* for copy, inflate and fill which don't bail out early based on any */ \
172 /* condition. When this value is set to zero compare operations like */ \
173 /* compare, vectorizedMismatch, compress can also use AVX512 intrinsics.*/\
174 product(int, AVX3Threshold, 4096, DIAGNOSTIC, \
175 "Minimum array size in bytes to use AVX512 intrinsics" \
176 "for copy, inflate and fill. When this value is set as zero" \
177 "compare operations can also use AVX512 intrinsics.") \
178 range(0, max_jint) \
179 constraint(AVX3ThresholdConstraintFunc,AfterErgo) \
180 \
181 product(bool, IntelJccErratumMitigation, true, DIAGNOSTIC, \
182 "Turn off JVM mitigations related to Intel micro code " \
183 "mitigations for the Intel JCC erratum") \
184 \
185 product(int, X86ICacheSync, -1, DIAGNOSTIC, \
186 "Select the X86 ICache sync mechanism: -1 = auto-select; " \
187 "0 = none (dangerous); 1 = CLFLUSH loop; 2 = CLFLUSHOPT loop; "\
188 "3 = CLWB loop; 4 = single CPUID; 5 = single SERIALIZE. " \
189 "Explicitly selected mechanism will fail at startup if " \
190 "hardware does not support it.") \
191 range(-1, 5) \
192 \
193 // end of ARCH_FLAGS
194
195 #endif // CPU_X86_GLOBALS_X86_HPP