1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
34 #include "utilities/macros.hpp"
35
36 int VM_Version::_cpu;
37 int VM_Version::_model;
38 int VM_Version::_model2;
39 int VM_Version::_variant;
40 int VM_Version::_revision;
41 int VM_Version::_stepping;
42
43 int VM_Version::_zva_length;
44 int VM_Version::_dcache_line_size;
45 int VM_Version::_icache_line_size;
46 int VM_Version::_initial_sve_vector_length;
47 int VM_Version::_max_supported_sve_vector_length;
48 bool VM_Version::_rop_protection;
49 uintptr_t VM_Version::_pac_mask;
50
51 SpinWait VM_Version::_spin_wait;
52
53 static SpinWait get_spin_wait_desc() {
54 if (strcmp(OnSpinWaitInst, "nop") == 0) {
55 return SpinWait(SpinWait::NOP, OnSpinWaitInstCount);
56 } else if (strcmp(OnSpinWaitInst, "isb") == 0) {
57 return SpinWait(SpinWait::ISB, OnSpinWaitInstCount);
58 } else if (strcmp(OnSpinWaitInst, "yield") == 0) {
59 return SpinWait(SpinWait::YIELD, OnSpinWaitInstCount);
60 } else if (strcmp(OnSpinWaitInst, "none") != 0) {
61 vm_exit_during_initialization("The options for OnSpinWaitInst are nop, isb, yield, and none", OnSpinWaitInst);
62 }
63
64 if (!FLAG_IS_DEFAULT(OnSpinWaitInstCount) && OnSpinWaitInstCount > 0) {
65 vm_exit_during_initialization("OnSpinWaitInstCount cannot be used for OnSpinWaitInst 'none'");
66 }
67
68 return SpinWait{};
69 }
70
71 void VM_Version::initialize() {
72 _supports_atomic_getset4 = true;
73 _supports_atomic_getadd4 = true;
74 _supports_atomic_getset8 = true;
75 _supports_atomic_getadd8 = true;
76
77 get_os_cpu_info();
78
79 int dcache_line = VM_Version::dcache_line_size();
80
81 // Limit AllocatePrefetchDistance so that it does not exceed the
82 // static constraint of 512 defined in runtime/globals.hpp.
83 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
84 FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
85
86 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
87 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
88 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
206 _features |= CPU_A53MAC;
207 if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
208 FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
209 }
210 }
211
212 // Cortex A73
213 if (_cpu == CPU_ARM && model_is(0xd09)) {
214 if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
215 FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
216 }
217 // A73 is faster with short-and-easy-for-speculative-execution-loop
218 if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
219 FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
220 }
221 }
222
223 // Neoverse
224 // N1: 0xd0c
225 // N2: 0xd49
226 // V1: 0xd40
227 // V2: 0xd4f
228 if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
229 model_is(0xd40) || model_is(0xd4f))) {
230 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
231 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
232 }
233
234 if (FLAG_IS_DEFAULT(OnSpinWaitInst)) {
235 FLAG_SET_DEFAULT(OnSpinWaitInst, "isb");
236 }
237
238 if (FLAG_IS_DEFAULT(OnSpinWaitInstCount)) {
239 FLAG_SET_DEFAULT(OnSpinWaitInstCount, 1);
240 }
241 if (FLAG_IS_DEFAULT(AlwaysMergeDMB)) {
242 FLAG_SET_DEFAULT(AlwaysMergeDMB, false);
243 }
244 }
245
246 if (_features & (CPU_FP | CPU_ASIMD)) {
247 if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
248 FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
249 }
250 }
251
252 if (FLAG_IS_DEFAULT(UseCRC32)) {
253 UseCRC32 = VM_Version::supports_crc32();
254 }
255
256 if (UseCRC32 && !VM_Version::supports_crc32()) {
257 warning("UseCRC32 specified, but not supported on this CPU");
258 FLAG_SET_DEFAULT(UseCRC32, false);
259 }
260
261 // Neoverse
262 // V1: 0xd40
263 // V2: 0xd4f
264 if (_cpu == CPU_ARM && (model_is(0xd40) || model_is(0xd4f))) {
265 if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
266 FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
267 }
268 if (FLAG_IS_DEFAULT(CodeEntryAlignment)) {
269 FLAG_SET_DEFAULT(CodeEntryAlignment, 32);
270 }
271 }
272
273 if (UseCryptoPmullForCRC32 && (!VM_Version::supports_pmull() || !VM_Version::supports_sha3() || !VM_Version::supports_crc32())) {
274 warning("UseCryptoPmullForCRC32 specified, but not supported on this CPU");
275 FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, false);
276 }
277
278 if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
279 FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
280 }
281
282 if (UseVectorizedMismatchIntrinsic) {
283 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
284 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
|
1 /*
2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2020, Red Hat Inc. All rights reserved.
4 * Copyright 2025 Arm Limited and/or its affiliates.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
35 #include "utilities/macros.hpp"
36
37 int VM_Version::_cpu;
38 int VM_Version::_model;
39 int VM_Version::_model2;
40 int VM_Version::_variant;
41 int VM_Version::_revision;
42 int VM_Version::_stepping;
43
44 int VM_Version::_zva_length;
45 int VM_Version::_dcache_line_size;
46 int VM_Version::_icache_line_size;
47 int VM_Version::_initial_sve_vector_length;
48 int VM_Version::_max_supported_sve_vector_length;
49 bool VM_Version::_rop_protection;
50 uintptr_t VM_Version::_pac_mask;
51
52 SpinWait VM_Version::_spin_wait;
53
54 static SpinWait get_spin_wait_desc() {
55 SpinWait spin_wait(OnSpinWaitInst, OnSpinWaitInstCount);
56 if (spin_wait.inst() == SpinWait::SB && !VM_Version::supports_sb()) {
57 vm_exit_during_initialization("OnSpinWaitInst is SB but current CPU does not support SB instruction");
58 }
59
60 return spin_wait;
61 }
62
63 void VM_Version::initialize() {
64 _supports_atomic_getset4 = true;
65 _supports_atomic_getadd4 = true;
66 _supports_atomic_getset8 = true;
67 _supports_atomic_getadd8 = true;
68
69 get_os_cpu_info();
70
71 int dcache_line = VM_Version::dcache_line_size();
72
73 // Limit AllocatePrefetchDistance so that it does not exceed the
74 // static constraint of 512 defined in runtime/globals.hpp.
75 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
76 FLAG_SET_DEFAULT(AllocatePrefetchDistance, MIN2(512, 3*dcache_line));
77
78 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
79 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, dcache_line);
80 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes))
198 _features |= CPU_A53MAC;
199 if (FLAG_IS_DEFAULT(UseSIMDForArrayEquals)) {
200 FLAG_SET_DEFAULT(UseSIMDForArrayEquals, false);
201 }
202 }
203
204 // Cortex A73
205 if (_cpu == CPU_ARM && model_is(0xd09)) {
206 if (FLAG_IS_DEFAULT(SoftwarePrefetchHintDistance)) {
207 FLAG_SET_DEFAULT(SoftwarePrefetchHintDistance, -1);
208 }
209 // A73 is faster with short-and-easy-for-speculative-execution-loop
210 if (FLAG_IS_DEFAULT(UseSimpleArrayEquals)) {
211 FLAG_SET_DEFAULT(UseSimpleArrayEquals, true);
212 }
213 }
214
215 // Neoverse
216 // N1: 0xd0c
217 // N2: 0xd49
218 // N3: 0xd8e
219 // V1: 0xd40
220 // V2: 0xd4f
221 // V3: 0xd84
222 if (_cpu == CPU_ARM && (model_is(0xd0c) || model_is(0xd49) ||
223 model_is(0xd40) || model_is(0xd4f) ||
224 model_is(0xd8e) || model_is(0xd84))) {
225 if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
226 FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
227 }
228
229 if (FLAG_IS_DEFAULT(OnSpinWaitInst)) {
230 if (model_is(0xd4f) && VM_Version::supports_sb()) {
231 FLAG_SET_DEFAULT(OnSpinWaitInst, "sb");
232 } else {
233 FLAG_SET_DEFAULT(OnSpinWaitInst, "isb");
234 }
235 }
236
237 if (FLAG_IS_DEFAULT(OnSpinWaitInstCount)) {
238 FLAG_SET_DEFAULT(OnSpinWaitInstCount, 1);
239 }
240 if (FLAG_IS_DEFAULT(AlwaysMergeDMB)) {
241 FLAG_SET_DEFAULT(AlwaysMergeDMB, false);
242 }
243 }
244
245 if (_features & (CPU_FP | CPU_ASIMD)) {
246 if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
247 FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
248 }
249 }
250
251 if (FLAG_IS_DEFAULT(UseCRC32)) {
252 UseCRC32 = VM_Version::supports_crc32();
253 }
254
255 if (UseCRC32 && !VM_Version::supports_crc32()) {
256 warning("UseCRC32 specified, but not supported on this CPU");
257 FLAG_SET_DEFAULT(UseCRC32, false);
258 }
259
260 // Neoverse
261 // V1: 0xd40
262 // V2: 0xd4f
263 // V3: 0xd84
264 if (_cpu == CPU_ARM &&
265 (model_is(0xd40) || model_is(0xd4f) || model_is(0xd84))) {
266 if (FLAG_IS_DEFAULT(UseCryptoPmullForCRC32)) {
267 FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, true);
268 }
269 if (FLAG_IS_DEFAULT(CodeEntryAlignment)) {
270 FLAG_SET_DEFAULT(CodeEntryAlignment, 32);
271 }
272 }
273
274 if (UseCryptoPmullForCRC32 && (!VM_Version::supports_pmull() || !VM_Version::supports_sha3() || !VM_Version::supports_crc32())) {
275 warning("UseCryptoPmullForCRC32 specified, but not supported on this CPU");
276 FLAG_SET_DEFAULT(UseCryptoPmullForCRC32, false);
277 }
278
279 if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
280 FLAG_SET_DEFAULT(UseAdler32Intrinsics, true);
281 }
282
283 if (UseVectorizedMismatchIntrinsic) {
284 warning("UseVectorizedMismatchIntrinsic specified, but not available on this CPU.");
285 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
|