1 /*
2 * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "asm/macroAssembler.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "classfile/vmIntrinsics.hpp"
28 #include "code/codeBlob.hpp"
29 #include "compiler/compilerDefinitions.inline.hpp"
30 #include "jvm.h"
31 #include "logging/log.hpp"
32 #include "logging/logStream.hpp"
33 #include "memory/resourceArea.hpp"
34 #include "memory/universe.hpp"
35 #include "runtime/globals_extension.hpp"
36 #include "runtime/java.hpp"
37 #include "runtime/os.inline.hpp"
38 #include "runtime/stubCodeGenerator.hpp"
39 #include "runtime/vm_version.hpp"
40 #include "utilities/checkedCast.hpp"
41 #include "utilities/ostream.hpp"
42 #include "utilities/powerOfTwo.hpp"
43 #include "utilities/virtualizationSupport.hpp"
44
45 int VM_Version::_cpu;
46 int VM_Version::_model;
47 int VM_Version::_stepping;
48 bool VM_Version::_has_intel_jcc_erratum;
49 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
50
51 #define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
52 const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
53 #undef DECLARE_CPU_FEATURE_NAME
54
55 // Address of instruction which causes SEGV
56 address VM_Version::_cpuinfo_segv_addr = nullptr;
57 // Address of instruction after the one which causes SEGV
58 address VM_Version::_cpuinfo_cont_addr = nullptr;
59 // Address of instruction which causes APX specific SEGV
60 address VM_Version::_cpuinfo_segv_addr_apx = nullptr;
61 // Address of instruction after the one which causes APX specific SEGV
62 address VM_Version::_cpuinfo_cont_addr_apx = nullptr;
63
64 static BufferBlob* stub_blob;
65 static const int stub_size = 2550;
66
67 int VM_Version::VM_Features::_features_bitmap_size = sizeof(VM_Version::VM_Features::_features_bitmap) / BytesPerLong;
68
69 VM_Version::VM_Features VM_Version::_features;
70 VM_Version::VM_Features VM_Version::_cpu_features;
71
72 extern "C" {
73 typedef void (*get_cpu_info_stub_t)(void*);
74 typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
75 typedef void (*clear_apx_test_state_t)(void);
76 typedef void (*getCPUIDBrandString_stub_t)(void*);
77 }
78 static get_cpu_info_stub_t get_cpu_info_stub = nullptr;
79 static detect_virt_stub_t detect_virt_stub = nullptr;
80 static clear_apx_test_state_t clear_apx_test_state_stub = nullptr;
81 static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
82
83 bool VM_Version::supports_clflush() {
84 // clflush should always be available on x86_64
85 // if not we are in real trouble because we rely on it
86 // to flush the code cache.
87 // Unfortunately, Assembler::clflush is currently called as part
88 // of generation of the code cache flush routine. This happens
89 // under Universe::init before the processor features are set
90 // up. Assembler::flush calls this routine to check that clflush
91 // is allowed. So, we give the caller a free pass if Universe init
92 // is still in progress.
93 assert ((!Universe::is_fully_initialized() || _features.supports_feature(CPU_FLUSH)), "clflush should be available");
94 return true;
95 }
96
97 #define CPUID_STANDARD_FN 0x0
98 #define CPUID_STANDARD_FN_1 0x1
99 #define CPUID_STANDARD_FN_4 0x4
100 #define CPUID_STANDARD_FN_B 0xb
101
102 #define CPUID_EXTENDED_FN 0x80000000
103 #define CPUID_EXTENDED_FN_1 0x80000001
104 #define CPUID_EXTENDED_FN_2 0x80000002
105 #define CPUID_EXTENDED_FN_3 0x80000003
106 #define CPUID_EXTENDED_FN_4 0x80000004
107 #define CPUID_EXTENDED_FN_7 0x80000007
108 #define CPUID_EXTENDED_FN_8 0x80000008
109
110 class VM_Version_StubGenerator: public StubCodeGenerator {
111 public:
112
113 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
114
115 address clear_apx_test_state() {
116 # define __ _masm->
117 address start = __ pc();
118 // EGPRs are call clobbered registers, Explicit clearing of r16 and r31 during signal
119 // handling guarantees that preserved register values post signal handling were
120 // re-instantiated by operating system and not because they were not modified externally.
121
122 bool save_apx = UseAPX;
123 VM_Version::set_apx_cpuFeatures();
124 UseAPX = true;
125 // EGPR state save/restoration.
126 __ mov64(r16, 0L);
127 __ mov64(r31, 0L);
128 UseAPX = save_apx;
129 VM_Version::clean_cpuFeatures();
130 __ ret(0);
131 return start;
132 }
133
134 address generate_get_cpu_info() {
135 // Flags to test CPU type.
136 const uint32_t HS_EFL_AC = 0x40000;
137 const uint32_t HS_EFL_ID = 0x200000;
138 // Values for when we don't have a CPUID instruction.
139 const int CPU_FAMILY_SHIFT = 8;
140 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
141 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
142 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
143
144 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
145 Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
146 Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
147 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
148
149 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
150 # define __ _masm->
151
152 address start = __ pc();
153
154 //
155 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
156 //
157 // rcx and rdx are first and second argument registers on windows
158
159 __ push(rbp);
160 __ mov(rbp, c_rarg0); // cpuid_info address
161 __ push(rbx);
162 __ push(rsi);
163 __ pushf(); // preserve rbx, and flags
164 __ pop(rax);
165 __ push(rax);
166 __ mov(rcx, rax);
167 //
168 // if we are unable to change the AC flag, we have a 386
169 //
170 __ xorl(rax, HS_EFL_AC);
171 __ push(rax);
172 __ popf();
173 __ pushf();
174 __ pop(rax);
175 __ cmpptr(rax, rcx);
176 __ jccb(Assembler::notEqual, detect_486);
177
178 __ movl(rax, CPU_FAMILY_386);
179 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
180 __ jmp(done);
181
182 //
183 // If we are unable to change the ID flag, we have a 486 which does
184 // not support the "cpuid" instruction.
185 //
186 __ bind(detect_486);
187 __ mov(rax, rcx);
188 __ xorl(rax, HS_EFL_ID);
189 __ push(rax);
190 __ popf();
191 __ pushf();
192 __ pop(rax);
193 __ cmpptr(rcx, rax);
194 __ jccb(Assembler::notEqual, detect_586);
195
196 __ bind(cpu486);
197 __ movl(rax, CPU_FAMILY_486);
198 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
199 __ jmp(done);
200
201 //
202 // At this point, we have a chip which supports the "cpuid" instruction
203 //
204 __ bind(detect_586);
205 __ xorl(rax, rax);
206 __ cpuid();
207 __ orl(rax, rax);
208 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
209 // value of at least 1, we give up and
210 // assume a 486
211 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
212 __ movl(Address(rsi, 0), rax);
213 __ movl(Address(rsi, 4), rbx);
214 __ movl(Address(rsi, 8), rcx);
215 __ movl(Address(rsi,12), rdx);
216
217 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported?
218 __ jccb(Assembler::belowEqual, std_cpuid4);
219
220 //
221 // cpuid(0xB) Processor Topology
222 //
223 __ movl(rax, 0xb);
224 __ xorl(rcx, rcx); // Threads level
225 __ cpuid();
226
227 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
228 __ movl(Address(rsi, 0), rax);
229 __ movl(Address(rsi, 4), rbx);
230 __ movl(Address(rsi, 8), rcx);
231 __ movl(Address(rsi,12), rdx);
232
233 __ movl(rax, 0xb);
234 __ movl(rcx, 1); // Cores level
235 __ cpuid();
236 __ push(rax);
237 __ andl(rax, 0x1f); // Determine if valid topology level
238 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
239 __ andl(rax, 0xffff);
240 __ pop(rax);
241 __ jccb(Assembler::equal, std_cpuid4);
242
243 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
244 __ movl(Address(rsi, 0), rax);
245 __ movl(Address(rsi, 4), rbx);
246 __ movl(Address(rsi, 8), rcx);
247 __ movl(Address(rsi,12), rdx);
248
249 __ movl(rax, 0xb);
250 __ movl(rcx, 2); // Packages level
251 __ cpuid();
252 __ push(rax);
253 __ andl(rax, 0x1f); // Determine if valid topology level
254 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
255 __ andl(rax, 0xffff);
256 __ pop(rax);
257 __ jccb(Assembler::equal, std_cpuid4);
258
259 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
260 __ movl(Address(rsi, 0), rax);
261 __ movl(Address(rsi, 4), rbx);
262 __ movl(Address(rsi, 8), rcx);
263 __ movl(Address(rsi,12), rdx);
264
265 //
266 // cpuid(0x4) Deterministic cache params
267 //
268 __ bind(std_cpuid4);
269 __ movl(rax, 4);
270 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
271 __ jccb(Assembler::greater, std_cpuid1);
272
273 __ xorl(rcx, rcx); // L1 cache
274 __ cpuid();
275 __ push(rax);
276 __ andl(rax, 0x1f); // Determine if valid cache parameters used
277 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache
278 __ pop(rax);
279 __ jccb(Assembler::equal, std_cpuid1);
280
281 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
282 __ movl(Address(rsi, 0), rax);
283 __ movl(Address(rsi, 4), rbx);
284 __ movl(Address(rsi, 8), rcx);
285 __ movl(Address(rsi,12), rdx);
286
287 //
288 // Standard cpuid(0x1)
289 //
290 __ bind(std_cpuid1);
291 __ movl(rax, 1);
292 __ cpuid();
293 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
294 __ movl(Address(rsi, 0), rax);
295 __ movl(Address(rsi, 4), rbx);
296 __ movl(Address(rsi, 8), rcx);
297 __ movl(Address(rsi,12), rdx);
298
299 //
300 // Check if OS has enabled XGETBV instruction to access XCR0
301 // (OSXSAVE feature flag) and CPU supports AVX
302 //
303 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
304 __ cmpl(rcx, 0x18000000);
305 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
306
307 //
308 // XCR0, XFEATURE_ENABLED_MASK register
309 //
310 __ xorl(rcx, rcx); // zero for XCR0 register
311 __ xgetbv();
312 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
313 __ movl(Address(rsi, 0), rax);
314 __ movl(Address(rsi, 4), rdx);
315
316 //
317 // cpuid(0x7) Structured Extended Features Enumeration Leaf.
318 //
319 __ bind(sef_cpuid);
320 __ movl(rax, 7);
321 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
322 __ jccb(Assembler::greater, ext_cpuid);
323 // ECX = 0
324 __ xorl(rcx, rcx);
325 __ cpuid();
326 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
327 __ movl(Address(rsi, 0), rax);
328 __ movl(Address(rsi, 4), rbx);
329 __ movl(Address(rsi, 8), rcx);
330 __ movl(Address(rsi, 12), rdx);
331
332 //
333 // cpuid(0x7) Structured Extended Features Enumeration Sub-Leaf 1.
334 //
335 __ bind(sefsl1_cpuid);
336 __ movl(rax, 7);
337 __ movl(rcx, 1);
338 __ cpuid();
339 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
340 __ movl(Address(rsi, 0), rax);
341 __ movl(Address(rsi, 4), rdx);
342
343 //
344 // cpuid(0x29) APX NCI NDD NF (EAX = 29H, ECX = 0).
345 //
346 __ bind(std_cpuid29);
347 __ movl(rax, 0x29);
348 __ movl(rcx, 0);
349 __ cpuid();
350 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid29_offset())));
351 __ movl(Address(rsi, 0), rbx);
352
353 //
354 // cpuid(0x24) Converged Vector ISA Main Leaf (EAX = 24H, ECX = 0).
355 //
356 __ bind(std_cpuid24);
357 __ movl(rax, 0x24);
358 __ movl(rcx, 0);
359 __ cpuid();
360 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid24_offset())));
361 __ movl(Address(rsi, 0), rax);
362 __ movl(Address(rsi, 4), rbx);
363
364 //
365 // Extended cpuid(0x80000000)
366 //
367 __ bind(ext_cpuid);
368 __ movl(rax, 0x80000000);
369 __ cpuid();
370 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
371 __ jcc(Assembler::belowEqual, done);
372 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
373 __ jcc(Assembler::belowEqual, ext_cpuid1);
374 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported?
375 __ jccb(Assembler::belowEqual, ext_cpuid5);
376 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
377 __ jccb(Assembler::belowEqual, ext_cpuid7);
378 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported?
379 __ jccb(Assembler::belowEqual, ext_cpuid8);
380 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported?
381 __ jccb(Assembler::below, ext_cpuid8);
382 //
383 // Extended cpuid(0x8000001E)
384 //
385 __ movl(rax, 0x8000001E);
386 __ cpuid();
387 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
388 __ movl(Address(rsi, 0), rax);
389 __ movl(Address(rsi, 4), rbx);
390 __ movl(Address(rsi, 8), rcx);
391 __ movl(Address(rsi,12), rdx);
392
393 //
394 // Extended cpuid(0x80000008)
395 //
396 __ bind(ext_cpuid8);
397 __ movl(rax, 0x80000008);
398 __ cpuid();
399 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
400 __ movl(Address(rsi, 0), rax);
401 __ movl(Address(rsi, 4), rbx);
402 __ movl(Address(rsi, 8), rcx);
403 __ movl(Address(rsi,12), rdx);
404
405 //
406 // Extended cpuid(0x80000007)
407 //
408 __ bind(ext_cpuid7);
409 __ movl(rax, 0x80000007);
410 __ cpuid();
411 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
412 __ movl(Address(rsi, 0), rax);
413 __ movl(Address(rsi, 4), rbx);
414 __ movl(Address(rsi, 8), rcx);
415 __ movl(Address(rsi,12), rdx);
416
417 //
418 // Extended cpuid(0x80000005)
419 //
420 __ bind(ext_cpuid5);
421 __ movl(rax, 0x80000005);
422 __ cpuid();
423 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
424 __ movl(Address(rsi, 0), rax);
425 __ movl(Address(rsi, 4), rbx);
426 __ movl(Address(rsi, 8), rcx);
427 __ movl(Address(rsi,12), rdx);
428
429 //
430 // Extended cpuid(0x80000001)
431 //
432 __ bind(ext_cpuid1);
433 __ movl(rax, 0x80000001);
434 __ cpuid();
435 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
436 __ movl(Address(rsi, 0), rax);
437 __ movl(Address(rsi, 4), rbx);
438 __ movl(Address(rsi, 8), rcx);
439 __ movl(Address(rsi,12), rdx);
440
441 //
442 // Check if OS has enabled XGETBV instruction to access XCR0
443 // (OSXSAVE feature flag) and CPU supports APX
444 //
445 // To enable APX, check CPUID.EAX=7.ECX=1.EDX[21] bit for HW support
446 // and XCRO[19] bit for OS support to save/restore extended GPR state.
447 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
448 __ movl(rax, 0x200000);
449 __ andl(rax, Address(rsi, 4));
450 __ jcc(Assembler::equal, vector_save_restore);
451 // check _cpuid_info.xem_xcr0_eax.bits.apx_f
452 __ movl(rax, 0x80000);
453 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
454 __ jcc(Assembler::equal, vector_save_restore);
455
456 bool save_apx = UseAPX;
457 VM_Version::set_apx_cpuFeatures();
458 UseAPX = true;
459 __ mov64(r16, VM_Version::egpr_test_value());
460 __ mov64(r31, VM_Version::egpr_test_value());
461 __ xorl(rsi, rsi);
462 VM_Version::set_cpuinfo_segv_addr_apx(__ pc());
463 // Generate SEGV
464 __ movl(rax, Address(rsi, 0));
465
466 VM_Version::set_cpuinfo_cont_addr_apx(__ pc());
467 __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_save_offset())));
468 __ movq(Address(rsi, 0), r16);
469 __ movq(Address(rsi, 8), r31);
470
471 //
472 // Query CPUID 0xD.19 for APX XSAVE offset
473 // Extended State Enumeration Sub-leaf 19 (APX)
474 // EAX = size of APX state (should be 128)
475 // EBX = offset in standard XSAVE format
476 //
477 __ movl(rax, 0xD);
478 __ movl(rcx, 19);
479 __ cpuid();
480 __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
481 __ movl(Address(rsi, 0), rax);
482 __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
483 __ movl(Address(rsi, 0), rbx);
484
485 UseAPX = save_apx;
486 __ bind(vector_save_restore);
487 //
488 // Check if OS has enabled XGETBV instruction to access XCR0
489 // (OSXSAVE feature flag) and CPU supports AVX
490 //
491 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
492 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
493 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx
494 __ cmpl(rcx, 0x18000000);
495 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported
496
497 __ movl(rax, 0x6);
498 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
499 __ cmpl(rax, 0x6);
500 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported
501
502 // we need to bridge farther than imm8, so we use this island as a thunk
503 __ bind(done);
504 __ jmp(wrapup);
505
506 __ bind(start_simd_check);
507 //
508 // Some OSs have a bug when upper 128/256bits of YMM/ZMM
509 // registers are not restored after a signal processing.
510 // Generate SEGV here (reference through null)
511 // and check upper YMM/ZMM bits after it.
512 //
513 int saved_useavx = UseAVX;
514 int saved_usesse = UseSSE;
515
516 // If UseAVX is uninitialized or is set by the user to include EVEX
517 if (use_evex) {
518 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
519 // OR check _cpuid_info.sefsl1_cpuid7_edx.bits.avx10
520 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
521 __ movl(rax, 0x10000);
522 __ andl(rax, Address(rsi, 4));
523 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
524 __ movl(rbx, 0x80000);
525 __ andl(rbx, Address(rsi, 4));
526 __ orl(rax, rbx);
527 __ jccb(Assembler::equal, legacy_setup); // jump if EVEX is not supported
528 // check _cpuid_info.xem_xcr0_eax.bits.opmask
529 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
530 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
531 __ movl(rax, 0xE0);
532 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
533 __ cmpl(rax, 0xE0);
534 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
535
536 if (FLAG_IS_DEFAULT(UseAVX)) {
537 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
538 __ movl(rax, Address(rsi, 0));
539 __ cmpl(rax, 0x50654); // If it is Skylake
540 __ jcc(Assembler::equal, legacy_setup);
541 }
542 // EVEX setup: run in lowest evex mode
543 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
544 UseAVX = 3;
545 UseSSE = 2;
546 #ifdef _WINDOWS
547 // xmm5-xmm15 are not preserved by caller on windows
548 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
549 __ subptr(rsp, 64);
550 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
551 __ subptr(rsp, 64);
552 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
553 __ subptr(rsp, 64);
554 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
555 #endif // _WINDOWS
556
557 // load value into all 64 bytes of zmm7 register
558 __ movl(rcx, VM_Version::ymm_test_value());
559 __ movdl(xmm0, rcx);
560 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
561 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
562 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
563 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
564 VM_Version::clean_cpuFeatures();
565 __ jmp(save_restore_except);
566 }
567
568 __ bind(legacy_setup);
569 // AVX setup
570 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
571 UseAVX = 1;
572 UseSSE = 2;
573 #ifdef _WINDOWS
574 __ subptr(rsp, 32);
575 __ vmovdqu(Address(rsp, 0), xmm7);
576 __ subptr(rsp, 32);
577 __ vmovdqu(Address(rsp, 0), xmm8);
578 __ subptr(rsp, 32);
579 __ vmovdqu(Address(rsp, 0), xmm15);
580 #endif // _WINDOWS
581
582 // load value into all 32 bytes of ymm7 register
583 __ movl(rcx, VM_Version::ymm_test_value());
584
585 __ movdl(xmm0, rcx);
586 __ pshufd(xmm0, xmm0, 0x00);
587 __ vinsertf128_high(xmm0, xmm0);
588 __ vmovdqu(xmm7, xmm0);
589 __ vmovdqu(xmm8, xmm0);
590 __ vmovdqu(xmm15, xmm0);
591 VM_Version::clean_cpuFeatures();
592
593 __ bind(save_restore_except);
594 __ xorl(rsi, rsi);
595 VM_Version::set_cpuinfo_segv_addr(__ pc());
596 // Generate SEGV
597 __ movl(rax, Address(rsi, 0));
598
599 VM_Version::set_cpuinfo_cont_addr(__ pc());
600 // Returns here after signal. Save xmm0 to check it later.
601
602 // If UseAVX is uninitialized or is set by the user to include EVEX
603 if (use_evex) {
604 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
605 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
606 __ movl(rax, 0x10000);
607 __ andl(rax, Address(rsi, 4));
608 __ jcc(Assembler::equal, legacy_save_restore);
609 // check _cpuid_info.xem_xcr0_eax.bits.opmask
610 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
611 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
612 __ movl(rax, 0xE0);
613 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
614 __ cmpl(rax, 0xE0);
615 __ jcc(Assembler::notEqual, legacy_save_restore);
616
617 if (FLAG_IS_DEFAULT(UseAVX)) {
618 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
619 __ movl(rax, Address(rsi, 0));
620 __ cmpl(rax, 0x50654); // If it is Skylake
621 __ jcc(Assembler::equal, legacy_save_restore);
622 }
623 // EVEX check: run in lowest evex mode
624 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
625 UseAVX = 3;
626 UseSSE = 2;
627 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
628 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
629 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
630 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
631 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
632
633 #ifdef _WINDOWS
634 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
635 __ addptr(rsp, 64);
636 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
637 __ addptr(rsp, 64);
638 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
639 __ addptr(rsp, 64);
640 #endif // _WINDOWS
641 generate_vzeroupper(wrapup);
642 VM_Version::clean_cpuFeatures();
643 UseAVX = saved_useavx;
644 UseSSE = saved_usesse;
645 __ jmp(wrapup);
646 }
647
648 __ bind(legacy_save_restore);
649 // AVX check
650 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
651 UseAVX = 1;
652 UseSSE = 2;
653 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
654 __ vmovdqu(Address(rsi, 0), xmm0);
655 __ vmovdqu(Address(rsi, 32), xmm7);
656 __ vmovdqu(Address(rsi, 64), xmm8);
657 __ vmovdqu(Address(rsi, 96), xmm15);
658
659 #ifdef _WINDOWS
660 __ vmovdqu(xmm15, Address(rsp, 0));
661 __ addptr(rsp, 32);
662 __ vmovdqu(xmm8, Address(rsp, 0));
663 __ addptr(rsp, 32);
664 __ vmovdqu(xmm7, Address(rsp, 0));
665 __ addptr(rsp, 32);
666 #endif // _WINDOWS
667
668 generate_vzeroupper(wrapup);
669 VM_Version::clean_cpuFeatures();
670 UseAVX = saved_useavx;
671 UseSSE = saved_usesse;
672
673 __ bind(wrapup);
674 __ popf();
675 __ pop(rsi);
676 __ pop(rbx);
677 __ pop(rbp);
678 __ ret(0);
679
680 # undef __
681
682 return start;
683 };
684 void generate_vzeroupper(Label& L_wrapup) {
685 # define __ _masm->
686 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
687 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
688 __ jcc(Assembler::notEqual, L_wrapup);
689 __ movl(rcx, 0x0FFF0FF0);
690 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
691 __ andl(rcx, Address(rsi, 0));
692 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
693 __ jcc(Assembler::equal, L_wrapup);
694 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
695 __ jcc(Assembler::equal, L_wrapup);
696 // vzeroupper() will use a pre-computed instruction sequence that we
697 // can't compute until after we've determined CPU capabilities. Use
698 // uncached variant here directly to be able to bootstrap correctly
699 __ vzeroupper_uncached();
700 # undef __
701 }
702 address generate_detect_virt() {
703 StubCodeMark mark(this, "VM_Version", "detect_virt_stub");
704 # define __ _masm->
705
706 address start = __ pc();
707
708 // Evacuate callee-saved registers
709 __ push(rbp);
710 __ push(rbx);
711 __ push(rsi); // for Windows
712
713 __ mov(rax, c_rarg0); // CPUID leaf
714 __ mov(rsi, c_rarg1); // register array address (eax, ebx, ecx, edx)
715
716 __ cpuid();
717
718 // Store result to register array
719 __ movl(Address(rsi, 0), rax);
720 __ movl(Address(rsi, 4), rbx);
721 __ movl(Address(rsi, 8), rcx);
722 __ movl(Address(rsi, 12), rdx);
723
724 // Epilogue
725 __ pop(rsi);
726 __ pop(rbx);
727 __ pop(rbp);
728 __ ret(0);
729
730 # undef __
731
732 return start;
733 };
734
735
736 address generate_getCPUIDBrandString(void) {
737 // Flags to test CPU type.
738 const uint32_t HS_EFL_AC = 0x40000;
739 const uint32_t HS_EFL_ID = 0x200000;
740 // Values for when we don't have a CPUID instruction.
741 const int CPU_FAMILY_SHIFT = 8;
742 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
743 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
744
745 Label detect_486, cpu486, detect_586, done, ext_cpuid;
746
747 StubCodeMark mark(this, "VM_Version", "getCPUIDNameInfo_stub");
748 # define __ _masm->
749
750 address start = __ pc();
751
752 //
753 // void getCPUIDBrandString(VM_Version::CpuidInfo* cpuid_info);
754 //
755 // rcx and rdx are first and second argument registers on windows
756
757 __ push(rbp);
758 __ mov(rbp, c_rarg0); // cpuid_info address
759 __ push(rbx);
760 __ push(rsi);
761 __ pushf(); // preserve rbx, and flags
762 __ pop(rax);
763 __ push(rax);
764 __ mov(rcx, rax);
765 //
766 // if we are unable to change the AC flag, we have a 386
767 //
768 __ xorl(rax, HS_EFL_AC);
769 __ push(rax);
770 __ popf();
771 __ pushf();
772 __ pop(rax);
773 __ cmpptr(rax, rcx);
774 __ jccb(Assembler::notEqual, detect_486);
775
776 __ movl(rax, CPU_FAMILY_386);
777 __ jmp(done);
778
779 //
780 // If we are unable to change the ID flag, we have a 486 which does
781 // not support the "cpuid" instruction.
782 //
783 __ bind(detect_486);
784 __ mov(rax, rcx);
785 __ xorl(rax, HS_EFL_ID);
786 __ push(rax);
787 __ popf();
788 __ pushf();
789 __ pop(rax);
790 __ cmpptr(rcx, rax);
791 __ jccb(Assembler::notEqual, detect_586);
792
793 __ bind(cpu486);
794 __ movl(rax, CPU_FAMILY_486);
795 __ jmp(done);
796
797 //
798 // At this point, we have a chip which supports the "cpuid" instruction
799 //
800 __ bind(detect_586);
801 __ xorl(rax, rax);
802 __ cpuid();
803 __ orl(rax, rax);
804 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
805 // value of at least 1, we give up and
806 // assume a 486
807
808 //
809 // Extended cpuid(0x80000000) for processor brand string detection
810 //
811 __ bind(ext_cpuid);
812 __ movl(rax, CPUID_EXTENDED_FN);
813 __ cpuid();
814 __ cmpl(rax, CPUID_EXTENDED_FN_4);
815 __ jcc(Assembler::below, done);
816
817 //
818 // Extended cpuid(0x80000002) // first 16 bytes in brand string
819 //
820 __ movl(rax, CPUID_EXTENDED_FN_2);
821 __ cpuid();
822 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_0_offset())));
823 __ movl(Address(rsi, 0), rax);
824 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_1_offset())));
825 __ movl(Address(rsi, 0), rbx);
826 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_2_offset())));
827 __ movl(Address(rsi, 0), rcx);
828 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_3_offset())));
829 __ movl(Address(rsi,0), rdx);
830
831 //
832 // Extended cpuid(0x80000003) // next 16 bytes in brand string
833 //
834 __ movl(rax, CPUID_EXTENDED_FN_3);
835 __ cpuid();
836 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_4_offset())));
837 __ movl(Address(rsi, 0), rax);
838 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_5_offset())));
839 __ movl(Address(rsi, 0), rbx);
840 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_6_offset())));
841 __ movl(Address(rsi, 0), rcx);
842 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_7_offset())));
843 __ movl(Address(rsi,0), rdx);
844
845 //
846 // Extended cpuid(0x80000004) // last 16 bytes in brand string
847 //
848 __ movl(rax, CPUID_EXTENDED_FN_4);
849 __ cpuid();
850 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_8_offset())));
851 __ movl(Address(rsi, 0), rax);
852 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_9_offset())));
853 __ movl(Address(rsi, 0), rbx);
854 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_10_offset())));
855 __ movl(Address(rsi, 0), rcx);
856 __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_11_offset())));
857 __ movl(Address(rsi,0), rdx);
858
859 //
860 // return
861 //
862 __ bind(done);
863 __ popf();
864 __ pop(rsi);
865 __ pop(rbx);
866 __ pop(rbp);
867 __ ret(0);
868
869 # undef __
870
871 return start;
872 };
873 };
874
875 void VM_Version::get_processor_features() {
876
877 _cpu = 4; // 486 by default
878 _model = 0;
879 _stepping = 0;
880 _logical_processors_per_package = 1;
881 // i486 internal cache is both I&D and has a 16-byte line size
882 _L1_data_cache_line_size = 16;
883
884 // Get raw processor info
885
886 get_cpu_info_stub(&_cpuid_info);
887
888 assert_is_initialized();
889 _cpu = extended_cpu_family();
890 _model = extended_cpu_model();
891 _stepping = cpu_stepping();
892
893 if (cpu_family() > 4) { // it supports CPUID
894 _features = _cpuid_info.feature_flags(); // These can be changed by VM settings
895 _cpu_features = _features; // Preserve features
896 // Logical processors are only available on P4s and above,
897 // and only if hyperthreading is available.
898 _logical_processors_per_package = logical_processor_count();
899 _L1_data_cache_line_size = L1_line_size();
900 }
901
902 // xchg and xadd instructions
903 _supports_atomic_getset4 = true;
904 _supports_atomic_getadd4 = true;
905 _supports_atomic_getset8 = true;
906 _supports_atomic_getadd8 = true;
907
908 // OS should support SSE for x64 and hardware should support at least SSE2.
909 if (!VM_Version::supports_sse2()) {
910 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
911 }
912 // in 64 bit the use of SSE2 is the minimum
913 if (UseSSE < 2) UseSSE = 2;
914
915 // flush_icache_stub have to be generated first.
916 // That is why Icache line size is hard coded in ICache class,
917 // see icache_x86.hpp. It is also the reason why we can't use
918 // clflush instruction in 32-bit VM since it could be running
919 // on CPU which does not support it.
920 //
921 // The only thing we can do is to verify that flushed
922 // ICache::line_size has correct value.
923 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
924 // clflush_size is size in quadwords (8 bytes).
925 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
926
927 // assigning this field effectively enables Unsafe.writebackMemory()
928 // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
929 // that is only implemented on x86_64 and only if the OS plays ball
930 if (os::supports_map_sync()) {
931 // publish data cache line flush size to generic field, otherwise
932 // let if default to zero thereby disabling writeback
933 _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
934 }
935
936 // Check if processor has Intel Ecore
937 if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
938 (supports_hybrid() ||
939 _model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
940 _model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
941 FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
942 }
943
944 if (UseSSE < 4) {
945 _features.clear_feature(CPU_SSE4_1);
946 _features.clear_feature(CPU_SSE4_2);
947 }
948
949 if (UseSSE < 3) {
950 _features.clear_feature(CPU_SSE3);
951 _features.clear_feature(CPU_SSSE3);
952 _features.clear_feature(CPU_SSE4A);
953 }
954
955 if (UseSSE < 2)
956 _features.clear_feature(CPU_SSE2);
957
958 if (UseSSE < 1)
959 _features.clear_feature(CPU_SSE);
960
961 // ZX cpus specific settings
962 if (is_zx() && FLAG_IS_DEFAULT(UseAVX)) {
963 if (cpu_family() == 7) {
964 if (extended_cpu_model() == 0x5B || extended_cpu_model() == 0x6B) {
965 UseAVX = 1;
966 } else if (extended_cpu_model() == 0x1B || extended_cpu_model() == 0x3B) {
967 UseAVX = 0;
968 }
969 } else if (cpu_family() == 6) {
970 UseAVX = 0;
971 }
972 }
973
974 // UseSSE is set to the smaller of what hardware supports and what
975 // the command line requires. I.e., you cannot set UseSSE to 2 on
976 // older Pentiums which do not support it.
977 int use_sse_limit = 0;
978 if (UseSSE > 0) {
979 if (UseSSE > 3 && supports_sse4_1()) {
980 use_sse_limit = 4;
981 } else if (UseSSE > 2 && supports_sse3()) {
982 use_sse_limit = 3;
983 } else if (UseSSE > 1 && supports_sse2()) {
984 use_sse_limit = 2;
985 } else if (UseSSE > 0 && supports_sse()) {
986 use_sse_limit = 1;
987 } else {
988 use_sse_limit = 0;
989 }
990 }
991 if (FLAG_IS_DEFAULT(UseSSE)) {
992 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
993 } else if (UseSSE > use_sse_limit) {
994 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", UseSSE, use_sse_limit);
995 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
996 }
997
998 // first try initial setting and detect what we can support
999 int use_avx_limit = 0;
1000 if (UseAVX > 0) {
1001 if (UseSSE < 4) {
1002 // Don't use AVX if SSE is unavailable or has been disabled.
1003 use_avx_limit = 0;
1004 } else if (UseAVX > 2 && supports_evex()) {
1005 use_avx_limit = 3;
1006 } else if (UseAVX > 1 && supports_avx2()) {
1007 use_avx_limit = 2;
1008 } else if (UseAVX > 0 && supports_avx()) {
1009 use_avx_limit = 1;
1010 } else {
1011 use_avx_limit = 0;
1012 }
1013 }
1014 if (FLAG_IS_DEFAULT(UseAVX)) {
1015 // Don't use AVX-512 on older Skylakes unless explicitly requested.
1016 if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) {
1017 FLAG_SET_DEFAULT(UseAVX, 2);
1018 } else {
1019 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
1020 }
1021 }
1022
1023 if (UseAVX > use_avx_limit) {
1024 if (UseSSE < 4) {
1025 warning("UseAVX=%d requires UseSSE=4, setting it to UseAVX=0", UseAVX);
1026 } else {
1027 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", UseAVX, use_avx_limit);
1028 }
1029 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
1030 }
1031
1032 if (UseAVX < 3) {
1033 _features.clear_feature(CPU_AVX512F);
1034 _features.clear_feature(CPU_AVX512DQ);
1035 _features.clear_feature(CPU_AVX512CD);
1036 _features.clear_feature(CPU_AVX512BW);
1037 _features.clear_feature(CPU_AVX512ER);
1038 _features.clear_feature(CPU_AVX512PF);
1039 _features.clear_feature(CPU_AVX512VL);
1040 _features.clear_feature(CPU_AVX512_VPOPCNTDQ);
1041 _features.clear_feature(CPU_AVX512_VPCLMULQDQ);
1042 _features.clear_feature(CPU_AVX512_VAES);
1043 _features.clear_feature(CPU_AVX512_VNNI);
1044 _features.clear_feature(CPU_AVX512_VBMI);
1045 _features.clear_feature(CPU_AVX512_VBMI2);
1046 _features.clear_feature(CPU_AVX512_BITALG);
1047 _features.clear_feature(CPU_AVX512_IFMA);
1048 _features.clear_feature(CPU_APX_F);
1049 _features.clear_feature(CPU_AVX512_FP16);
1050 _features.clear_feature(CPU_AVX10_1);
1051 _features.clear_feature(CPU_AVX10_2);
1052 }
1053
1054
1055 if (UseAVX < 2) {
1056 _features.clear_feature(CPU_AVX2);
1057 _features.clear_feature(CPU_AVX_IFMA);
1058 }
1059
1060 if (UseAVX < 1) {
1061 _features.clear_feature(CPU_AVX);
1062 _features.clear_feature(CPU_VZEROUPPER);
1063 _features.clear_feature(CPU_F16C);
1064 _features.clear_feature(CPU_SHA512);
1065 }
1066
1067 if (logical_processors_per_package() == 1) {
1068 // HT processor could be installed on a system which doesn't support HT.
1069 _features.clear_feature(CPU_HT);
1070 }
1071
1072 if (is_intel()) { // Intel cpus specific settings
1073 if (is_knights_family()) {
1074 _features.clear_feature(CPU_VZEROUPPER);
1075 _features.clear_feature(CPU_AVX512BW);
1076 _features.clear_feature(CPU_AVX512VL);
1077 _features.clear_feature(CPU_APX_F);
1078 _features.clear_feature(CPU_AVX512DQ);
1079 _features.clear_feature(CPU_AVX512_VNNI);
1080 _features.clear_feature(CPU_AVX512_VAES);
1081 _features.clear_feature(CPU_AVX512_VPOPCNTDQ);
1082 _features.clear_feature(CPU_AVX512_VPCLMULQDQ);
1083 _features.clear_feature(CPU_AVX512_VBMI);
1084 _features.clear_feature(CPU_AVX512_VBMI2);
1085 _features.clear_feature(CPU_CLWB);
1086 _features.clear_feature(CPU_FLUSHOPT);
1087 _features.clear_feature(CPU_GFNI);
1088 _features.clear_feature(CPU_AVX512_BITALG);
1089 _features.clear_feature(CPU_AVX512_IFMA);
1090 _features.clear_feature(CPU_AVX_IFMA);
1091 _features.clear_feature(CPU_AVX512_FP16);
1092 _features.clear_feature(CPU_AVX10_1);
1093 _features.clear_feature(CPU_AVX10_2);
1094 }
1095 }
1096
1097 // Currently APX support is only enabled for targets supporting AVX512VL feature.
1098 bool apx_supported = os_supports_apx_egprs() && supports_apx_f() && supports_avx512vl();
1099 if (UseAPX && !apx_supported) {
1100 warning("UseAPX is not supported on this CPU, setting it to false");
1101 FLAG_SET_DEFAULT(UseAPX, false);
1102 }
1103
1104 if (!UseAPX) {
1105 _features.clear_feature(CPU_APX_F);
1106 }
1107
1108 if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) {
1109 _has_intel_jcc_erratum = compute_has_intel_jcc_erratum();
1110 FLAG_SET_ERGO(IntelJccErratumMitigation, _has_intel_jcc_erratum);
1111 } else {
1112 _has_intel_jcc_erratum = IntelJccErratumMitigation;
1113 }
1114
1115 assert(supports_clflush(), "Always present");
1116 if (X86ICacheSync == -1) {
1117 // Auto-detect, choosing the best performant one that still flushes
1118 // the cache. We could switch to CPUID/SERIALIZE ("4"/"5") going forward.
1119 if (supports_clwb()) {
1120 FLAG_SET_ERGO(X86ICacheSync, 3);
1121 } else if (supports_clflushopt()) {
1122 FLAG_SET_ERGO(X86ICacheSync, 2);
1123 } else {
1124 FLAG_SET_ERGO(X86ICacheSync, 1);
1125 }
1126 } else {
1127 if ((X86ICacheSync == 2) && !supports_clflushopt()) {
1128 vm_exit_during_initialization("CPU does not support CLFLUSHOPT, unable to use X86ICacheSync=2");
1129 }
1130 if ((X86ICacheSync == 3) && !supports_clwb()) {
1131 vm_exit_during_initialization("CPU does not support CLWB, unable to use X86ICacheSync=3");
1132 }
1133 if ((X86ICacheSync == 5) && !supports_serialize()) {
1134 vm_exit_during_initialization("CPU does not support SERIALIZE, unable to use X86ICacheSync=5");
1135 }
1136 }
1137
1138 stringStream ss(2048);
1139 if (supports_hybrid()) {
1140 ss.print("(hybrid)");
1141 } else {
1142 ss.print("(%u cores per cpu, %u threads per core)", cores_per_cpu(), threads_per_core());
1143 }
1144 ss.print(" family %d model %d stepping %d microcode 0x%x",
1145 cpu_family(), _model, _stepping, os::cpu_microcode_revision());
1146 ss.print(", ");
1147 int features_offset = (int)ss.size();
1148 insert_features_names(_features, ss);
1149
1150 _cpu_info_string = ss.as_string(true);
1151 _features_string = _cpu_info_string + features_offset;
1152
1153 // Use AES instructions if available.
1154 if (supports_aes()) {
1155 if (FLAG_IS_DEFAULT(UseAES)) {
1156 FLAG_SET_DEFAULT(UseAES, true);
1157 }
1158 if (!UseAES) {
1159 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1160 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
1161 }
1162 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1163 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1164 warning("AES_CTR intrinsics require UseAES flag to be enabled. AES_CTR intrinsics will be disabled.");
1165 }
1166 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1167 } else {
1168 if (UseSSE > 2) {
1169 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1170 FLAG_SET_DEFAULT(UseAESIntrinsics, true);
1171 }
1172 } else {
1173 // The AES intrinsic stubs require AES instruction support (of course)
1174 // but also require sse3 mode or higher for instructions it use.
1175 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1176 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
1177 }
1178 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1179 }
1180
1181 // --AES-CTR begins--
1182 if (!UseAESIntrinsics) {
1183 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1184 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
1185 }
1186 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1187 } else {
1188 if (supports_sse4_1()) {
1189 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1190 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
1191 }
1192 } else {
1193 // The AES-CTR intrinsic stubs require AES instruction support (of course)
1194 // but also require sse4.1 mode or higher for instructions it use.
1195 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1196 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled.");
1197 }
1198 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1199 }
1200 }
1201 // --AES-CTR ends--
1202 }
1203 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
1204 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
1205 warning("AES instructions are not available on this CPU");
1206 }
1207 FLAG_SET_DEFAULT(UseAES, false);
1208 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1209 warning("AES intrinsics are not available on this CPU");
1210 }
1211 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1212 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1213 warning("AES-CTR intrinsics are not available on this CPU");
1214 }
1215 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1216 }
1217
1218 // Use CLMUL instructions if available.
1219 if (supports_clmul()) {
1220 if (FLAG_IS_DEFAULT(UseCLMUL)) {
1221 UseCLMUL = true;
1222 }
1223 } else if (UseCLMUL) {
1224 if (!FLAG_IS_DEFAULT(UseCLMUL))
1225 warning("CLMUL instructions not available on this CPU (AVX may also be required)");
1226 FLAG_SET_DEFAULT(UseCLMUL, false);
1227 }
1228
1229 if (UseCLMUL && (UseSSE > 2)) {
1230 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
1231 UseCRC32Intrinsics = true;
1232 }
1233 } else if (UseCRC32Intrinsics) {
1234 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
1235 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
1236 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
1237 }
1238
1239 if (supports_avx2()) {
1240 if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
1241 UseAdler32Intrinsics = true;
1242 }
1243 } else if (UseAdler32Intrinsics) {
1244 if (!FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
1245 warning("Adler32 Intrinsics requires avx2 instructions (not available on this CPU)");
1246 }
1247 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
1248 }
1249
1250 if (supports_sse4_2() && supports_clmul()) {
1251 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
1252 UseCRC32CIntrinsics = true;
1253 }
1254 } else if (UseCRC32CIntrinsics) {
1255 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
1256 warning("CRC32C intrinsics are not available on this CPU");
1257 }
1258 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
1259 }
1260
1261 // GHASH/GCM intrinsics
1262 if (UseCLMUL && (UseSSE > 2)) {
1263 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
1264 UseGHASHIntrinsics = true;
1265 }
1266 } else if (UseGHASHIntrinsics) {
1267 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
1268 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
1269 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
1270 }
1271
1272 // ChaCha20 Intrinsics
1273 // As long as the system supports AVX as a baseline we can do a
1274 // SIMD-enabled block function. StubGenerator makes the determination
1275 // based on the VM capabilities whether to use an AVX2 or AVX512-enabled
1276 // version.
1277 if (UseAVX >= 1) {
1278 if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
1279 UseChaCha20Intrinsics = true;
1280 }
1281 } else if (UseChaCha20Intrinsics) {
1282 if (!FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
1283 warning("ChaCha20 intrinsic requires AVX instructions");
1284 }
1285 FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
1286 }
1287
1288 // Kyber Intrinsics
1289 // Currently we only have them for AVX512
1290 if (supports_evex() && supports_avx512bw()) {
1291 if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) {
1292 UseKyberIntrinsics = true;
1293 }
1294 } else
1295 if (UseKyberIntrinsics) {
1296 warning("Intrinsics for ML-KEM are not available on this CPU.");
1297 FLAG_SET_DEFAULT(UseKyberIntrinsics, false);
1298 }
1299
1300 // Dilithium Intrinsics
1301 if (UseAVX > 1) {
1302 if (FLAG_IS_DEFAULT(UseDilithiumIntrinsics)) {
1303 UseDilithiumIntrinsics = true;
1304 }
1305 } else if (UseDilithiumIntrinsics) {
1306 warning("Intrinsics for ML-DSA are not available on this CPU.");
1307 FLAG_SET_DEFAULT(UseDilithiumIntrinsics, false);
1308 }
1309
1310 // Base64 Intrinsics (Check the condition for which the intrinsic will be active)
1311 if (UseAVX >= 2) {
1312 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) {
1313 UseBASE64Intrinsics = true;
1314 }
1315 } else if (UseBASE64Intrinsics) {
1316 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics))
1317 warning("Base64 intrinsic requires EVEX instructions on this CPU");
1318 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false);
1319 }
1320
1321 if (supports_fma()) {
1322 if (FLAG_IS_DEFAULT(UseFMA)) {
1323 UseFMA = true;
1324 }
1325 } else if (UseFMA) {
1326 warning("FMA instructions are not available on this CPU");
1327 FLAG_SET_DEFAULT(UseFMA, false);
1328 }
1329
1330 if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) {
1331 UseMD5Intrinsics = true;
1332 }
1333
1334 if (supports_sha() || (supports_avx2() && supports_bmi2())) {
1335 if (FLAG_IS_DEFAULT(UseSHA)) {
1336 UseSHA = true;
1337 }
1338 } else if (UseSHA) {
1339 warning("SHA instructions are not available on this CPU");
1340 FLAG_SET_DEFAULT(UseSHA, false);
1341 }
1342
1343 if (supports_sha() && supports_sse4_1() && UseSHA) {
1344 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
1345 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
1346 }
1347 } else if (UseSHA1Intrinsics) {
1348 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
1349 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
1350 }
1351
1352 if (supports_sse4_1() && UseSHA) {
1353 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
1354 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
1355 }
1356 } else if (UseSHA256Intrinsics) {
1357 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
1358 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
1359 }
1360
1361 if (UseSHA && supports_avx2() && (supports_bmi2() || supports_sha512())) {
1362 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
1363 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
1364 }
1365 } else if (UseSHA512Intrinsics) {
1366 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
1367 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
1368 }
1369
1370 if (UseSHA && supports_evex() && supports_avx512bw()) {
1371 if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
1372 FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
1373 }
1374 } else if (UseSHA3Intrinsics) {
1375 warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
1376 FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
1377 }
1378
1379 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics || UseSHA3Intrinsics)) {
1380 FLAG_SET_DEFAULT(UseSHA, false);
1381 }
1382
1383 #if COMPILER2_OR_JVMCI
1384 int max_vector_size = 0;
1385 if (UseAVX == 0 || !os_supports_avx_vectors()) {
1386 // 16 byte vectors (in XMM) are supported with SSE2+
1387 max_vector_size = 16;
1388 } else if (UseAVX == 1 || UseAVX == 2) {
1389 // 32 bytes vectors (in YMM) are only supported with AVX+
1390 max_vector_size = 32;
1391 } else if (UseAVX > 2) {
1392 // 64 bytes vectors (in ZMM) are only supported with AVX 3
1393 max_vector_size = 64;
1394 }
1395
1396 int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit
1397
1398 if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
1399 if (MaxVectorSize < min_vector_size) {
1400 warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
1401 FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
1402 }
1403 if (MaxVectorSize > max_vector_size) {
1404 warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
1405 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1406 }
1407 if (!is_power_of_2(MaxVectorSize)) {
1408 warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size);
1409 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1410 }
1411 } else {
1412 // If default, use highest supported configuration
1413 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1414 }
1415
1416 #if defined(COMPILER2) && defined(ASSERT)
1417 if (MaxVectorSize > 0) {
1418 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
1419 tty->print_cr("State of YMM registers after signal handle:");
1420 int nreg = 4;
1421 const char* ymm_name[4] = {"0", "7", "8", "15"};
1422 for (int i = 0; i < nreg; i++) {
1423 tty->print("YMM%s:", ymm_name[i]);
1424 for (int j = 7; j >=0; j--) {
1425 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
1426 }
1427 tty->cr();
1428 }
1429 }
1430 }
1431 #endif // COMPILER2 && ASSERT
1432
1433 if ((supports_avx512ifma() && supports_avx512vlbw()) || supports_avxifma()) {
1434 if (FLAG_IS_DEFAULT(UsePoly1305Intrinsics)) {
1435 FLAG_SET_DEFAULT(UsePoly1305Intrinsics, true);
1436 }
1437 } else if (UsePoly1305Intrinsics) {
1438 warning("Intrinsics for Poly1305 crypto hash functions not available on this CPU.");
1439 FLAG_SET_DEFAULT(UsePoly1305Intrinsics, false);
1440 }
1441
1442 if ((supports_avx512ifma() && supports_avx512vlbw()) || supports_avxifma()) {
1443 if (FLAG_IS_DEFAULT(UseIntPolyIntrinsics)) {
1444 FLAG_SET_DEFAULT(UseIntPolyIntrinsics, true);
1445 }
1446 } else if (UseIntPolyIntrinsics) {
1447 warning("Intrinsics for Polynomial crypto functions not available on this CPU.");
1448 FLAG_SET_DEFAULT(UseIntPolyIntrinsics, false);
1449 }
1450
1451 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1452 UseMultiplyToLenIntrinsic = true;
1453 }
1454 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1455 UseSquareToLenIntrinsic = true;
1456 }
1457 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1458 UseMulAddIntrinsic = true;
1459 }
1460 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1461 UseMontgomeryMultiplyIntrinsic = true;
1462 }
1463 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1464 UseMontgomerySquareIntrinsic = true;
1465 }
1466 #endif // COMPILER2_OR_JVMCI
1467
1468 // On new cpus instructions which update whole XMM register should be used
1469 // to prevent partial register stall due to dependencies on high half.
1470 //
1471 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
1472 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
1473 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
1474 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
1475
1476
1477 if (is_zx()) { // ZX cpus specific settings
1478 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1479 UseStoreImmI16 = false; // don't use it on ZX cpus
1480 }
1481 if ((cpu_family() == 6) || (cpu_family() == 7)) {
1482 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1483 // Use it on all ZX cpus
1484 UseAddressNop = true;
1485 }
1486 }
1487 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1488 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
1489 }
1490 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1491 if (supports_sse3()) {
1492 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
1493 } else {
1494 UseXmmRegToRegMoveAll = false;
1495 }
1496 }
1497 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
1498 #ifdef COMPILER2
1499 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1500 // For new ZX cpus do the next optimization:
1501 // don't align the beginning of a loop if there are enough instructions
1502 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1503 // in current fetch line (OptoLoopAlignment) or the padding
1504 // is big (> MaxLoopPad).
1505 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of
1506 // generated NOP instructions. 11 is the largest size of one
1507 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1508 MaxLoopPad = 11;
1509 }
1510 #endif // COMPILER2
1511 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1512 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
1513 }
1514 if (supports_sse4_2()) { // new ZX cpus
1515 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1516 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
1517 }
1518 }
1519 }
1520
1521 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1522 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1523 }
1524 }
1525
1526 if (is_amd_family()) { // AMD cpus specific settings
1527 if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
1528 // Use it on new AMD cpus starting from Opteron.
1529 UseAddressNop = true;
1530 }
1531 if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
1532 // Use it on new AMD cpus starting from Opteron.
1533 UseNewLongLShift = true;
1534 }
1535 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1536 if (supports_sse4a()) {
1537 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
1538 } else {
1539 UseXmmLoadAndClearUpper = false;
1540 }
1541 }
1542 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1543 if (supports_sse4a()) {
1544 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
1545 } else {
1546 UseXmmRegToRegMoveAll = false;
1547 }
1548 }
1549 if (FLAG_IS_DEFAULT(UseXmmI2F)) {
1550 if (supports_sse4a()) {
1551 UseXmmI2F = true;
1552 } else {
1553 UseXmmI2F = false;
1554 }
1555 }
1556 if (FLAG_IS_DEFAULT(UseXmmI2D)) {
1557 if (supports_sse4a()) {
1558 UseXmmI2D = true;
1559 } else {
1560 UseXmmI2D = false;
1561 }
1562 }
1563
1564 // some defaults for AMD family 15h
1565 if (cpu_family() == 0x15) {
1566 // On family 15h processors default is no sw prefetch
1567 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1568 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1569 }
1570 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
1571 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1572 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1573 }
1574 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
1575 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1576 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1577 }
1578 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1579 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1580 }
1581 }
1582
1583 #ifdef COMPILER2
1584 if (cpu_family() < 0x17 && MaxVectorSize > 16) {
1585 // Limit vectors size to 16 bytes on AMD cpus < 17h.
1586 FLAG_SET_DEFAULT(MaxVectorSize, 16);
1587 }
1588 #endif // COMPILER2
1589
1590 // Some defaults for AMD family >= 17h && Hygon family 18h
1591 if (cpu_family() >= 0x17) {
1592 // On family >=17h processors use XMM and UnalignedLoadStores
1593 // for Array Copy
1594 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1595 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1596 }
1597 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1598 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1599 }
1600 #ifdef COMPILER2
1601 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1602 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1603 }
1604 #endif
1605 }
1606 }
1607
1608 if (is_intel()) { // Intel cpus specific settings
1609 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1610 UseStoreImmI16 = false; // don't use it on Intel cpus
1611 }
1612 if (is_intel_server_family() || cpu_family() == 15) {
1613 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1614 // Use it on all Intel cpus starting from PentiumPro
1615 UseAddressNop = true;
1616 }
1617 }
1618 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1619 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
1620 }
1621 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1622 if (supports_sse3()) {
1623 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
1624 } else {
1625 UseXmmRegToRegMoveAll = false;
1626 }
1627 }
1628 if (is_intel_server_family() && supports_sse3()) { // New Intel cpus
1629 #ifdef COMPILER2
1630 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1631 // For new Intel cpus do the next optimization:
1632 // don't align the beginning of a loop if there are enough instructions
1633 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1634 // in current fetch line (OptoLoopAlignment) or the padding
1635 // is big (> MaxLoopPad).
1636 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
1637 // generated NOP instructions. 11 is the largest size of one
1638 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1639 MaxLoopPad = 11;
1640 }
1641 #endif // COMPILER2
1642
1643 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1644 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
1645 }
1646 if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
1647 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1648 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1649 }
1650 }
1651 }
1652 if (is_atom_family() || is_knights_family()) {
1653 #ifdef COMPILER2
1654 if (FLAG_IS_DEFAULT(OptoScheduling)) {
1655 OptoScheduling = true;
1656 }
1657 #endif
1658 if (supports_sse4_2()) { // Silvermont
1659 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1660 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1661 }
1662 }
1663 if (FLAG_IS_DEFAULT(UseIncDec)) {
1664 FLAG_SET_DEFAULT(UseIncDec, false);
1665 }
1666 }
1667 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1668 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1669 }
1670 }
1671
1672 #ifdef COMPILER2
1673 if (UseAVX > 2) {
1674 if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
1675 (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
1676 ArrayOperationPartialInlineSize != 0 &&
1677 ArrayOperationPartialInlineSize != 16 &&
1678 ArrayOperationPartialInlineSize != 32 &&
1679 ArrayOperationPartialInlineSize != 64)) {
1680 int inline_size = 0;
1681 if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
1682 inline_size = 64;
1683 } else if (MaxVectorSize >= 32) {
1684 inline_size = 32;
1685 } else if (MaxVectorSize >= 16) {
1686 inline_size = 16;
1687 }
1688 if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
1689 warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
1690 }
1691 ArrayOperationPartialInlineSize = inline_size;
1692 }
1693
1694 if (ArrayOperationPartialInlineSize > MaxVectorSize) {
1695 ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
1696 if (ArrayOperationPartialInlineSize) {
1697 warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
1698 } else {
1699 warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
1700 }
1701 }
1702 }
1703
1704 if (FLAG_IS_DEFAULT(OptimizeFill)) {
1705 if (MaxVectorSize < 32 || (!EnableX86ECoreOpts && !VM_Version::supports_avx512vlbw())) {
1706 OptimizeFill = false;
1707 }
1708 }
1709 #endif
1710 if (supports_sse4_2()) {
1711 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1712 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1713 }
1714 } else {
1715 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1716 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1717 }
1718 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1719 }
1720 if (UseSSE42Intrinsics) {
1721 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1722 UseVectorizedMismatchIntrinsic = true;
1723 }
1724 } else if (UseVectorizedMismatchIntrinsic) {
1725 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic))
1726 warning("vectorizedMismatch intrinsics are not available on this CPU");
1727 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1728 }
1729 if (UseAVX >= 2) {
1730 FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, true);
1731 } else if (UseVectorizedHashCodeIntrinsic) {
1732 if (!FLAG_IS_DEFAULT(UseVectorizedHashCodeIntrinsic))
1733 warning("vectorizedHashCode intrinsics are not available on this CPU");
1734 FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, false);
1735 }
1736
1737 // Use count leading zeros count instruction if available.
1738 if (supports_lzcnt()) {
1739 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
1740 UseCountLeadingZerosInstruction = true;
1741 }
1742 } else if (UseCountLeadingZerosInstruction) {
1743 warning("lzcnt instruction is not available on this CPU");
1744 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
1745 }
1746
1747 // Use count trailing zeros instruction if available
1748 if (supports_bmi1()) {
1749 // tzcnt does not require VEX prefix
1750 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
1751 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1752 // Don't use tzcnt if BMI1 is switched off on command line.
1753 UseCountTrailingZerosInstruction = false;
1754 } else {
1755 UseCountTrailingZerosInstruction = true;
1756 }
1757 }
1758 } else if (UseCountTrailingZerosInstruction) {
1759 warning("tzcnt instruction is not available on this CPU");
1760 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
1761 }
1762
1763 // BMI instructions (except tzcnt) use an encoding with VEX prefix.
1764 // VEX prefix is generated only when AVX > 0.
1765 if (supports_bmi1() && supports_avx()) {
1766 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1767 UseBMI1Instructions = true;
1768 }
1769 } else if (UseBMI1Instructions) {
1770 warning("BMI1 instructions are not available on this CPU (AVX is also required)");
1771 FLAG_SET_DEFAULT(UseBMI1Instructions, false);
1772 }
1773
1774 if (supports_bmi2() && supports_avx()) {
1775 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
1776 UseBMI2Instructions = true;
1777 }
1778 } else if (UseBMI2Instructions) {
1779 warning("BMI2 instructions are not available on this CPU (AVX is also required)");
1780 FLAG_SET_DEFAULT(UseBMI2Instructions, false);
1781 }
1782
1783 // Use population count instruction if available.
1784 if (supports_popcnt()) {
1785 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
1786 UsePopCountInstruction = true;
1787 }
1788 } else if (UsePopCountInstruction) {
1789 warning("POPCNT instruction is not available on this CPU");
1790 FLAG_SET_DEFAULT(UsePopCountInstruction, false);
1791 }
1792
1793 // Use fast-string operations if available.
1794 if (supports_erms()) {
1795 if (FLAG_IS_DEFAULT(UseFastStosb)) {
1796 UseFastStosb = true;
1797 }
1798 } else if (UseFastStosb) {
1799 warning("fast-string operations are not available on this CPU");
1800 FLAG_SET_DEFAULT(UseFastStosb, false);
1801 }
1802
1803 // For AMD Processors use XMM/YMM MOVDQU instructions
1804 // for Object Initialization as default
1805 if (is_amd() && cpu_family() >= 0x19) {
1806 if (FLAG_IS_DEFAULT(UseFastStosb)) {
1807 UseFastStosb = false;
1808 }
1809 }
1810
1811 #ifdef COMPILER2
1812 if (is_intel() && MaxVectorSize > 16) {
1813 if (FLAG_IS_DEFAULT(UseFastStosb)) {
1814 UseFastStosb = false;
1815 }
1816 }
1817 #endif
1818
1819 // Use XMM/YMM MOVDQU instruction for Object Initialization
1820 if (!UseFastStosb && UseUnalignedLoadStores) {
1821 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1822 UseXMMForObjInit = true;
1823 }
1824 } else if (UseXMMForObjInit) {
1825 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1826 FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1827 }
1828
1829 #ifdef COMPILER2
1830 if (FLAG_IS_DEFAULT(AlignVector)) {
1831 // Modern processors allow misaligned memory operations for vectors.
1832 AlignVector = !UseUnalignedLoadStores;
1833 }
1834 #endif // COMPILER2
1835
1836 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1837 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1838 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1839 } else if (!supports_sse() && supports_3dnow_prefetch()) {
1840 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1841 }
1842 }
1843
1844 // Allocation prefetch settings
1845 int cache_line_size = checked_cast<int>(prefetch_data_size());
1846 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) &&
1847 (cache_line_size > AllocatePrefetchStepSize)) {
1848 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size);
1849 }
1850
1851 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) {
1852 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0");
1853 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1854 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag.");
1855 }
1856 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1857 }
1858
1859 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
1860 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2);
1861 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch));
1862 }
1863
1864 if (is_intel() && is_intel_server_family() && supports_sse3()) {
1865 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) &&
1866 supports_sse4_2() && supports_ht()) { // Nehalem based cpus
1867 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
1868 }
1869 #ifdef COMPILER2
1870 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) {
1871 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1872 }
1873 #endif
1874 }
1875
1876 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
1877 #ifdef COMPILER2
1878 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1879 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1880 }
1881 #endif
1882 }
1883
1884 // Prefetch settings
1885
1886 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
1887 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
1888 // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
1889 // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
1890
1891 // gc copy/scan is disabled if prefetchw isn't supported, because
1892 // Prefetch::write emits an inlined prefetchw on Linux.
1893 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
1894 // The used prefetcht0 instruction works for both amd64 and em64t.
1895
1896 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) {
1897 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576);
1898 }
1899 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) {
1900 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576);
1901 }
1902
1903 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
1904 (cache_line_size > ContendedPaddingWidth))
1905 ContendedPaddingWidth = cache_line_size;
1906
1907 // This machine allows unaligned memory accesses
1908 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
1909 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
1910 }
1911
1912 #ifndef PRODUCT
1913 if (log_is_enabled(Info, os, cpu)) {
1914 LogStream ls(Log(os, cpu)::info());
1915 outputStream* log = &ls;
1916 log->print_cr("Logical CPUs per core: %u",
1917 logical_processors_per_package());
1918 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1919 log->print("UseSSE=%d", UseSSE);
1920 if (UseAVX > 0) {
1921 log->print(" UseAVX=%d", UseAVX);
1922 }
1923 if (UseAES) {
1924 log->print(" UseAES=1");
1925 }
1926 #ifdef COMPILER2
1927 if (MaxVectorSize > 0) {
1928 log->print(" MaxVectorSize=%d", (int) MaxVectorSize);
1929 }
1930 #endif
1931 log->cr();
1932 log->print("Allocation");
1933 if (AllocatePrefetchStyle <= 0) {
1934 log->print_cr(": no prefetching");
1935 } else {
1936 log->print(" prefetching: ");
1937 if (AllocatePrefetchInstr == 0) {
1938 log->print("PREFETCHNTA");
1939 } else if (AllocatePrefetchInstr == 1) {
1940 log->print("PREFETCHT0");
1941 } else if (AllocatePrefetchInstr == 2) {
1942 log->print("PREFETCHT2");
1943 } else if (AllocatePrefetchInstr == 3) {
1944 log->print("PREFETCHW");
1945 }
1946 if (AllocatePrefetchLines > 1) {
1947 log->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
1948 } else {
1949 log->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize);
1950 }
1951 }
1952
1953 if (PrefetchCopyIntervalInBytes > 0) {
1954 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1955 }
1956 if (PrefetchScanIntervalInBytes > 0) {
1957 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1958 }
1959 if (ContendedPaddingWidth > 0) {
1960 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1961 }
1962 }
1963 #endif // !PRODUCT
1964 if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
1965 FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
1966 }
1967 if (FLAG_IS_DEFAULT(UseCopySignIntrinsic)) {
1968 FLAG_SET_DEFAULT(UseCopySignIntrinsic, true);
1969 }
1970 }
1971
1972 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1973 VirtualizationType vrt = VM_Version::get_detected_virtualization();
1974 if (vrt == XenHVM) {
1975 st->print_cr("Xen hardware-assisted virtualization detected");
1976 } else if (vrt == KVM) {
1977 st->print_cr("KVM virtualization detected");
1978 } else if (vrt == VMWare) {
1979 st->print_cr("VMWare virtualization detected");
1980 VirtualizationSupport::print_virtualization_info(st);
1981 } else if (vrt == HyperV) {
1982 st->print_cr("Hyper-V virtualization detected");
1983 } else if (vrt == HyperVRole) {
1984 st->print_cr("Hyper-V role detected");
1985 }
1986 }
1987
1988 bool VM_Version::compute_has_intel_jcc_erratum() {
1989 if (!is_intel_family_core()) {
1990 // Only Intel CPUs are affected.
1991 return false;
1992 }
1993 // The following table of affected CPUs is based on the following document released by Intel:
1994 // https://www.intel.com/content/dam/support/us/en/documents/processors/mitigations-jump-conditional-code-erratum.pdf
1995 switch (_model) {
1996 case 0x8E:
1997 // 06_8EH | 9 | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Amber Lake Y
1998 // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake U
1999 // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake U 23e
2000 // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake Y
2001 // 06_8EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake U43e
2002 // 06_8EH | B | 8th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Whiskey Lake U
2003 // 06_8EH | C | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Amber Lake Y
2004 // 06_8EH | C | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake U42
2005 // 06_8EH | C | 8th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Whiskey Lake U
2006 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xC;
2007 case 0x4E:
2008 // 06_4E | 3 | 6th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Skylake U
2009 // 06_4E | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake U23e
2010 // 06_4E | 3 | 6th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Skylake Y
2011 return _stepping == 0x3;
2012 case 0x55:
2013 // 06_55H | 4 | Intel(R) Xeon(R) Processor D Family based on microarchitecture code name Skylake D, Bakerville
2014 // 06_55H | 4 | Intel(R) Xeon(R) Scalable Processors based on microarchitecture code name Skylake Server
2015 // 06_55H | 4 | Intel(R) Xeon(R) Processor W Family based on microarchitecture code name Skylake W
2016 // 06_55H | 4 | Intel(R) Core(TM) X-series Processors based on microarchitecture code name Skylake X
2017 // 06_55H | 4 | Intel(R) Xeon(R) Processor E3 v5 Family based on microarchitecture code name Skylake Xeon E3
2018 // 06_55 | 7 | 2nd Generation Intel(R) Xeon(R) Scalable Processors based on microarchitecture code name Cascade Lake (server)
2019 return _stepping == 0x4 || _stepping == 0x7;
2020 case 0x5E:
2021 // 06_5E | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake H
2022 // 06_5E | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake S
2023 return _stepping == 0x3;
2024 case 0x9E:
2025 // 06_9EH | 9 | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake G
2026 // 06_9EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake H
2027 // 06_9EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake S
2028 // 06_9EH | 9 | Intel(R) Core(TM) X-series Processors based on microarchitecture code name Kaby Lake X
2029 // 06_9EH | 9 | Intel(R) Xeon(R) Processor E3 v6 Family Kaby Lake Xeon E3
2030 // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake H
2031 // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S
2032 // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (6+2) x/KBP
2033 // 06_9EH | A | Intel(R) Xeon(R) Processor E Family based on microarchitecture code name Coffee Lake S (6+2)
2034 // 06_9EH | A | Intel(R) Xeon(R) Processor E Family based on microarchitecture code name Coffee Lake S (4+2)
2035 // 06_9EH | B | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (4+2)
2036 // 06_9EH | B | Intel(R) Celeron(R) Processor G Series based on microarchitecture code name Coffee Lake S (4+2)
2037 // 06_9EH | D | 9th Generation Intel(R) Core(TM) Processor Family based on microarchitecturecode name Coffee Lake H (8+2)
2038 // 06_9EH | D | 9th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (8+2)
2039 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xD;
2040 case 0xA5:
2041 // Not in Intel documentation.
2042 // 06_A5H | | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake S/H
2043 return true;
2044 case 0xA6:
2045 // 06_A6H | 0 | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake U62
2046 return _stepping == 0x0;
2047 case 0xAE:
2048 // 06_AEH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake Refresh U (4+2)
2049 return _stepping == 0xA;
2050 default:
2051 // If we are running on another intel machine not recognized in the table, we are okay.
2052 return false;
2053 }
2054 }
2055
2056 // On Xen, the cpuid instruction returns
2057 // eax / registers[0]: Version of Xen
2058 // ebx / registers[1]: chars 'XenV'
2059 // ecx / registers[2]: chars 'MMXe'
2060 // edx / registers[3]: chars 'nVMM'
2061 //
2062 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
2063 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
2064 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
2065 // edx / registers[3]: chars 'M' / 'ware' / 't Hv'
2066 //
2067 // more information :
2068 // https://kb.vmware.com/s/article/1009458
2069 //
2070 void VM_Version::check_virtualizations() {
2071 uint32_t registers[4] = {0};
2072 char signature[13] = {0};
2073
2074 // Xen cpuid leaves can be found 0x100 aligned boundary starting
2075 // from 0x40000000 until 0x40010000.
2076 // https://lists.linuxfoundation.org/pipermail/virtualization/2012-May/019974.html
2077 for (int leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) {
2078 detect_virt_stub(leaf, registers);
2079 memcpy(signature, ®isters[1], 12);
2080
2081 if (strncmp("VMwareVMware", signature, 12) == 0) {
2082 Abstract_VM_Version::_detected_virtualization = VMWare;
2083 // check for extended metrics from guestlib
2084 VirtualizationSupport::initialize();
2085 } else if (strncmp("Microsoft Hv", signature, 12) == 0) {
2086 Abstract_VM_Version::_detected_virtualization = HyperV;
2087 #ifdef _WINDOWS
2088 // CPUID leaf 0x40000007 is available to the root partition only.
2089 // See Hypervisor Top Level Functional Specification section 2.4.8 for more details.
2090 // https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v6.0b.pdf
2091 detect_virt_stub(0x40000007, registers);
2092 if ((registers[0] != 0x0) ||
2093 (registers[1] != 0x0) ||
2094 (registers[2] != 0x0) ||
2095 (registers[3] != 0x0)) {
2096 Abstract_VM_Version::_detected_virtualization = HyperVRole;
2097 }
2098 #endif
2099 } else if (strncmp("KVMKVMKVM", signature, 9) == 0) {
2100 Abstract_VM_Version::_detected_virtualization = KVM;
2101 } else if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
2102 Abstract_VM_Version::_detected_virtualization = XenHVM;
2103 }
2104 }
2105 }
2106
2107 #ifdef COMPILER2
2108 // Determine if it's running on Cascade Lake using default options.
2109 bool VM_Version::is_default_intel_cascade_lake() {
2110 return FLAG_IS_DEFAULT(UseAVX) &&
2111 FLAG_IS_DEFAULT(MaxVectorSize) &&
2112 UseAVX > 2 &&
2113 is_intel_cascade_lake();
2114 }
2115 #endif
2116
2117 bool VM_Version::is_intel_cascade_lake() {
2118 return is_intel_skylake() && _stepping >= 5;
2119 }
2120
2121 bool VM_Version::is_intel_darkmont() {
2122 return is_intel() && is_intel_server_family() && (_model == 0xCC || _model == 0xDD);
2123 }
2124
2125 // avx3_threshold() sets the threshold at which 64-byte instructions are used
2126 // for implementing the array copy and clear operations.
2127 // The Intel platforms that supports the serialize instruction
2128 // has improved implementation of 64-byte load/stores and so the default
2129 // threshold is set to 0 for these platforms.
2130 int VM_Version::avx3_threshold() {
2131 return (is_intel_server_family() &&
2132 supports_serialize() &&
2133 FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
2134 }
2135
2136 void VM_Version::clear_apx_test_state() {
2137 clear_apx_test_state_stub();
2138 }
2139
2140 static bool _vm_version_initialized = false;
2141
2142 void VM_Version::initialize() {
2143 ResourceMark rm;
2144
2145 // Making this stub must be FIRST use of assembler
2146 stub_blob = BufferBlob::create("VM_Version stub", stub_size);
2147 if (stub_blob == nullptr) {
2148 vm_exit_during_initialization("Unable to allocate stub for VM_Version");
2149 }
2150 CodeBuffer c(stub_blob);
2151 VM_Version_StubGenerator g(&c);
2152
2153 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
2154 g.generate_get_cpu_info());
2155 detect_virt_stub = CAST_TO_FN_PTR(detect_virt_stub_t,
2156 g.generate_detect_virt());
2157 clear_apx_test_state_stub = CAST_TO_FN_PTR(clear_apx_test_state_t,
2158 g.clear_apx_test_state());
2159 getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t,
2160 g.generate_getCPUIDBrandString());
2161 get_processor_features();
2162
2163 Assembler::precompute_instructions();
2164
2165 if (VM_Version::supports_hv()) { // Supports hypervisor
2166 check_virtualizations();
2167 }
2168 _vm_version_initialized = true;
2169 }
2170
2171 typedef enum {
2172 CPU_FAMILY_8086_8088 = 0,
2173 CPU_FAMILY_INTEL_286 = 2,
2174 CPU_FAMILY_INTEL_386 = 3,
2175 CPU_FAMILY_INTEL_486 = 4,
2176 CPU_FAMILY_PENTIUM = 5,
2177 CPU_FAMILY_PENTIUMPRO = 6, // Same family several models
2178 CPU_FAMILY_PENTIUM_4 = 0xF
2179 } FamilyFlag;
2180
2181 typedef enum {
2182 RDTSCP_FLAG = 0x08000000, // bit 27
2183 INTEL64_FLAG = 0x20000000 // bit 29
2184 } _featureExtendedEdxFlag;
2185
2186 typedef enum {
2187 FPU_FLAG = 0x00000001,
2188 VME_FLAG = 0x00000002,
2189 DE_FLAG = 0x00000004,
2190 PSE_FLAG = 0x00000008,
2191 TSC_FLAG = 0x00000010,
2192 MSR_FLAG = 0x00000020,
2193 PAE_FLAG = 0x00000040,
2194 MCE_FLAG = 0x00000080,
2195 CX8_FLAG = 0x00000100,
2196 APIC_FLAG = 0x00000200,
2197 SEP_FLAG = 0x00000800,
2198 MTRR_FLAG = 0x00001000,
2199 PGE_FLAG = 0x00002000,
2200 MCA_FLAG = 0x00004000,
2201 CMOV_FLAG = 0x00008000,
2202 PAT_FLAG = 0x00010000,
2203 PSE36_FLAG = 0x00020000,
2204 PSNUM_FLAG = 0x00040000,
2205 CLFLUSH_FLAG = 0x00080000,
2206 DTS_FLAG = 0x00200000,
2207 ACPI_FLAG = 0x00400000,
2208 MMX_FLAG = 0x00800000,
2209 FXSR_FLAG = 0x01000000,
2210 SSE_FLAG = 0x02000000,
2211 SSE2_FLAG = 0x04000000,
2212 SS_FLAG = 0x08000000,
2213 HTT_FLAG = 0x10000000,
2214 TM_FLAG = 0x20000000
2215 } FeatureEdxFlag;
2216
2217 // VM_Version statics
2218 enum {
2219 ExtendedFamilyIdLength_INTEL = 16,
2220 ExtendedFamilyIdLength_AMD = 24
2221 };
2222
2223 const size_t VENDOR_LENGTH = 13;
2224 const size_t CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1);
2225 static char* _cpu_brand_string = nullptr;
2226 static int64_t _max_qualified_cpu_frequency = 0;
2227
2228 static int _no_of_threads = 0;
2229 static int _no_of_cores = 0;
2230
2231 const char* const _family_id_intel[ExtendedFamilyIdLength_INTEL] = {
2232 "8086/8088",
2233 "",
2234 "286",
2235 "386",
2236 "486",
2237 "Pentium",
2238 "Pentium Pro", //or Pentium-M/Woodcrest depending on model
2239 "",
2240 "",
2241 "",
2242 "",
2243 "",
2244 "",
2245 "",
2246 "",
2247 "Pentium 4"
2248 };
2249
2250 const char* const _family_id_amd[ExtendedFamilyIdLength_AMD] = {
2251 "",
2252 "",
2253 "",
2254 "",
2255 "5x86",
2256 "K5/K6",
2257 "Athlon/AthlonXP",
2258 "",
2259 "",
2260 "",
2261 "",
2262 "",
2263 "",
2264 "",
2265 "",
2266 "Opteron/Athlon64",
2267 "Opteron QC/Phenom", // Barcelona et.al.
2268 "",
2269 "",
2270 "",
2271 "",
2272 "",
2273 "",
2274 "Zen"
2275 };
2276 // Partially from Intel 64 and IA-32 Architecture Software Developer's Manual,
2277 // September 2013, Vol 3C Table 35-1
2278 const char* const _model_id_pentium_pro[] = {
2279 "",
2280 "Pentium Pro",
2281 "",
2282 "Pentium II model 3",
2283 "",
2284 "Pentium II model 5/Xeon/Celeron",
2285 "Celeron",
2286 "Pentium III/Pentium III Xeon",
2287 "Pentium III/Pentium III Xeon",
2288 "Pentium M model 9", // Yonah
2289 "Pentium III, model A",
2290 "Pentium III, model B",
2291 "",
2292 "Pentium M model D", // Dothan
2293 "",
2294 "Core 2", // 0xf Woodcrest/Conroe/Merom/Kentsfield/Clovertown
2295 "",
2296 "",
2297 "",
2298 "",
2299 "",
2300 "",
2301 "Celeron", // 0x16 Celeron 65nm
2302 "Core 2", // 0x17 Penryn / Harpertown
2303 "",
2304 "",
2305 "Core i7", // 0x1A CPU_MODEL_NEHALEM_EP
2306 "Atom", // 0x1B Z5xx series Silverthorn
2307 "",
2308 "Core 2", // 0x1D Dunnington (6-core)
2309 "Nehalem", // 0x1E CPU_MODEL_NEHALEM
2310 "",
2311 "",
2312 "",
2313 "",
2314 "",
2315 "",
2316 "Westmere", // 0x25 CPU_MODEL_WESTMERE
2317 "",
2318 "",
2319 "", // 0x28
2320 "",
2321 "Sandy Bridge", // 0x2a "2nd Generation Intel Core i7, i5, i3"
2322 "",
2323 "Westmere-EP", // 0x2c CPU_MODEL_WESTMERE_EP
2324 "Sandy Bridge-EP", // 0x2d CPU_MODEL_SANDYBRIDGE_EP
2325 "Nehalem-EX", // 0x2e CPU_MODEL_NEHALEM_EX
2326 "Westmere-EX", // 0x2f CPU_MODEL_WESTMERE_EX
2327 "",
2328 "",
2329 "",
2330 "",
2331 "",
2332 "",
2333 "",
2334 "",
2335 "",
2336 "",
2337 "Ivy Bridge", // 0x3a
2338 "",
2339 "Haswell", // 0x3c "4th Generation Intel Core Processor"
2340 "", // 0x3d "Next Generation Intel Core Processor"
2341 "Ivy Bridge-EP", // 0x3e "Next Generation Intel Xeon Processor E7 Family"
2342 "", // 0x3f "Future Generation Intel Xeon Processor"
2343 "",
2344 "",
2345 "",
2346 "",
2347 "",
2348 "Haswell", // 0x45 "4th Generation Intel Core Processor"
2349 "Haswell", // 0x46 "4th Generation Intel Core Processor"
2350 nullptr
2351 };
2352
2353 /* Brand ID is for back compatibility
2354 * Newer CPUs uses the extended brand string */
2355 const char* const _brand_id[] = {
2356 "",
2357 "Celeron processor",
2358 "Pentium III processor",
2359 "Intel Pentium III Xeon processor",
2360 "",
2361 "",
2362 "",
2363 "",
2364 "Intel Pentium 4 processor",
2365 nullptr
2366 };
2367
2368
2369 const char* const _feature_edx_id[] = {
2370 "On-Chip FPU",
2371 "Virtual Mode Extensions",
2372 "Debugging Extensions",
2373 "Page Size Extensions",
2374 "Time Stamp Counter",
2375 "Model Specific Registers",
2376 "Physical Address Extension",
2377 "Machine Check Exceptions",
2378 "CMPXCHG8B Instruction",
2379 "On-Chip APIC",
2380 "",
2381 "Fast System Call",
2382 "Memory Type Range Registers",
2383 "Page Global Enable",
2384 "Machine Check Architecture",
2385 "Conditional Mov Instruction",
2386 "Page Attribute Table",
2387 "36-bit Page Size Extension",
2388 "Processor Serial Number",
2389 "CLFLUSH Instruction",
2390 "",
2391 "Debug Trace Store feature",
2392 "ACPI registers in MSR space",
2393 "Intel Architecture MMX Technology",
2394 "Fast Float Point Save and Restore",
2395 "Streaming SIMD extensions",
2396 "Streaming SIMD extensions 2",
2397 "Self-Snoop",
2398 "Hyper Threading",
2399 "Thermal Monitor",
2400 "",
2401 "Pending Break Enable"
2402 };
2403
2404 const char* const _feature_extended_edx_id[] = {
2405 "",
2406 "",
2407 "",
2408 "",
2409 "",
2410 "",
2411 "",
2412 "",
2413 "",
2414 "",
2415 "",
2416 "SYSCALL/SYSRET",
2417 "",
2418 "",
2419 "",
2420 "",
2421 "",
2422 "",
2423 "",
2424 "",
2425 "Execute Disable Bit",
2426 "",
2427 "",
2428 "",
2429 "",
2430 "",
2431 "",
2432 "RDTSCP",
2433 "",
2434 "Intel 64 Architecture",
2435 "",
2436 ""
2437 };
2438
2439 const char* const _feature_ecx_id[] = {
2440 "Streaming SIMD Extensions 3",
2441 "PCLMULQDQ",
2442 "64-bit DS Area",
2443 "MONITOR/MWAIT instructions",
2444 "CPL Qualified Debug Store",
2445 "Virtual Machine Extensions",
2446 "Safer Mode Extensions",
2447 "Enhanced Intel SpeedStep technology",
2448 "Thermal Monitor 2",
2449 "Supplemental Streaming SIMD Extensions 3",
2450 "L1 Context ID",
2451 "",
2452 "Fused Multiply-Add",
2453 "CMPXCHG16B",
2454 "xTPR Update Control",
2455 "Perfmon and Debug Capability",
2456 "",
2457 "Process-context identifiers",
2458 "Direct Cache Access",
2459 "Streaming SIMD extensions 4.1",
2460 "Streaming SIMD extensions 4.2",
2461 "x2APIC",
2462 "MOVBE",
2463 "Popcount instruction",
2464 "TSC-Deadline",
2465 "AESNI",
2466 "XSAVE",
2467 "OSXSAVE",
2468 "AVX",
2469 "F16C",
2470 "RDRAND",
2471 ""
2472 };
2473
2474 const char* const _feature_extended_ecx_id[] = {
2475 "LAHF/SAHF instruction support",
2476 "Core multi-processor legacy mode",
2477 "",
2478 "",
2479 "",
2480 "Advanced Bit Manipulations: LZCNT",
2481 "SSE4A: MOVNTSS, MOVNTSD, EXTRQ, INSERTQ",
2482 "Misaligned SSE mode",
2483 "",
2484 "",
2485 "",
2486 "",
2487 "",
2488 "",
2489 "",
2490 "",
2491 "",
2492 "",
2493 "",
2494 "",
2495 "",
2496 "",
2497 "",
2498 "",
2499 "",
2500 "",
2501 "",
2502 "",
2503 "",
2504 "",
2505 "",
2506 ""
2507 };
2508
2509 const char* VM_Version::cpu_model_description(void) {
2510 uint32_t cpu_family = extended_cpu_family();
2511 uint32_t cpu_model = extended_cpu_model();
2512 const char* model = nullptr;
2513
2514 if (cpu_family == CPU_FAMILY_PENTIUMPRO) {
2515 for (uint32_t i = 0; i <= cpu_model; i++) {
2516 model = _model_id_pentium_pro[i];
2517 if (model == nullptr) {
2518 break;
2519 }
2520 }
2521 }
2522 return model;
2523 }
2524
2525 const char* VM_Version::cpu_brand_string(void) {
2526 if (_cpu_brand_string == nullptr) {
2527 _cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal);
2528 if (nullptr == _cpu_brand_string) {
2529 return nullptr;
2530 }
2531 int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH);
2532 if (ret_val != OS_OK) {
2533 FREE_C_HEAP_ARRAY(char, _cpu_brand_string);
2534 _cpu_brand_string = nullptr;
2535 }
2536 }
2537 return _cpu_brand_string;
2538 }
2539
2540 const char* VM_Version::cpu_brand(void) {
2541 const char* brand = nullptr;
2542
2543 if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) {
2544 int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF;
2545 brand = _brand_id[0];
2546 for (int i = 0; brand != nullptr && i <= brand_num; i += 1) {
2547 brand = _brand_id[i];
2548 }
2549 }
2550 return brand;
2551 }
2552
2553 bool VM_Version::cpu_is_em64t(void) {
2554 return ((_cpuid_info.ext_cpuid1_edx.value & INTEL64_FLAG) == INTEL64_FLAG);
2555 }
2556
2557 bool VM_Version::is_netburst(void) {
2558 return (is_intel() && (extended_cpu_family() == CPU_FAMILY_PENTIUM_4));
2559 }
2560
2561 bool VM_Version::supports_tscinv_ext(void) {
2562 if (!supports_tscinv_bit()) {
2563 return false;
2564 }
2565
2566 if (is_intel()) {
2567 return true;
2568 }
2569
2570 if (is_amd()) {
2571 return !is_amd_Barcelona();
2572 }
2573
2574 if (is_hygon()) {
2575 return true;
2576 }
2577
2578 return false;
2579 }
2580
2581 void VM_Version::resolve_cpu_information_details(void) {
2582
2583 // in future we want to base this information on proper cpu
2584 // and cache topology enumeration such as:
2585 // Intel 64 Architecture Processor Topology Enumeration
2586 // which supports system cpu and cache topology enumeration
2587 // either using 2xAPICIDs or initial APICIDs
2588
2589 // currently only rough cpu information estimates
2590 // which will not necessarily reflect the exact configuration of the system
2591
2592 // this is the number of logical hardware threads
2593 // visible to the operating system
2594 _no_of_threads = os::processor_count();
2595
2596 // find out number of threads per cpu package
2597 int threads_per_package = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus;
2598 if (threads_per_package == 0) {
2599 // Fallback code to avoid div by zero in subsequent code.
2600 // CPUID 0Bh (ECX = 1) might return 0 on older AMD processor (EPYC 7763 at least)
2601 threads_per_package = threads_per_core() * cores_per_cpu();
2602 }
2603
2604 // use amount of threads visible to the process in order to guess number of sockets
2605 _no_of_sockets = _no_of_threads / threads_per_package;
2606
2607 // process might only see a subset of the total number of threads
2608 // from a single processor package. Virtualization/resource management for example.
2609 // If so then just write a hard 1 as num of pkgs.
2610 if (0 == _no_of_sockets) {
2611 _no_of_sockets = 1;
2612 }
2613
2614 // estimate the number of cores
2615 _no_of_cores = cores_per_cpu() * _no_of_sockets;
2616 }
2617
2618
2619 const char* VM_Version::cpu_family_description(void) {
2620 int cpu_family_id = extended_cpu_family();
2621 if (is_amd()) {
2622 if (cpu_family_id < ExtendedFamilyIdLength_AMD) {
2623 return _family_id_amd[cpu_family_id];
2624 }
2625 }
2626 if (is_intel()) {
2627 if (cpu_family_id == CPU_FAMILY_PENTIUMPRO) {
2628 return cpu_model_description();
2629 }
2630 if (cpu_family_id < ExtendedFamilyIdLength_INTEL) {
2631 return _family_id_intel[cpu_family_id];
2632 }
2633 }
2634 if (is_zx()) {
2635 int cpu_model_id = extended_cpu_model();
2636 if (cpu_family_id == 7) {
2637 switch (cpu_model_id) {
2638 case 0x1B:
2639 return "wudaokou";
2640 case 0x3B:
2641 return "lujiazui";
2642 case 0x5B:
2643 return "yongfeng";
2644 case 0x6B:
2645 return "shijidadao";
2646 }
2647 } else if (cpu_family_id == 6) {
2648 return "zhangjiang";
2649 }
2650 }
2651 if (is_hygon()) {
2652 return "Dhyana";
2653 }
2654 return "Unknown x86";
2655 }
2656
2657 int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
2658 assert(buf != nullptr, "buffer is null!");
2659 assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!");
2660
2661 const char* cpu_type = nullptr;
2662 const char* x64 = nullptr;
2663
2664 if (is_intel()) {
2665 cpu_type = "Intel";
2666 x64 = cpu_is_em64t() ? " Intel64" : "";
2667 } else if (is_amd()) {
2668 cpu_type = "AMD";
2669 x64 = cpu_is_em64t() ? " AMD64" : "";
2670 } else if (is_zx()) {
2671 cpu_type = "Zhaoxin";
2672 x64 = cpu_is_em64t() ? " x86_64" : "";
2673 } else if (is_hygon()) {
2674 cpu_type = "Hygon";
2675 x64 = cpu_is_em64t() ? " AMD64" : "";
2676 } else {
2677 cpu_type = "Unknown x86";
2678 x64 = cpu_is_em64t() ? " x86_64" : "";
2679 }
2680
2681 jio_snprintf(buf, buf_len, "%s %s%s SSE SSE2%s%s%s%s%s%s%s%s",
2682 cpu_type,
2683 cpu_family_description(),
2684 supports_ht() ? " (HT)" : "",
2685 supports_sse3() ? " SSE3" : "",
2686 supports_ssse3() ? " SSSE3" : "",
2687 supports_sse4_1() ? " SSE4.1" : "",
2688 supports_sse4_2() ? " SSE4.2" : "",
2689 supports_sse4a() ? " SSE4A" : "",
2690 is_netburst() ? " Netburst" : "",
2691 is_intel_family_core() ? " Core" : "",
2692 x64);
2693
2694 return OS_OK;
2695 }
2696
2697 int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
2698 assert(buf != nullptr, "buffer is null!");
2699 assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!");
2700 assert(getCPUIDBrandString_stub != nullptr, "not initialized");
2701
2702 // invoke newly generated asm code to fetch CPU Brand String
2703 getCPUIDBrandString_stub(&_cpuid_info);
2704
2705 // fetch results into buffer
2706 *((uint32_t*) &buf[0]) = _cpuid_info.proc_name_0;
2707 *((uint32_t*) &buf[4]) = _cpuid_info.proc_name_1;
2708 *((uint32_t*) &buf[8]) = _cpuid_info.proc_name_2;
2709 *((uint32_t*) &buf[12]) = _cpuid_info.proc_name_3;
2710 *((uint32_t*) &buf[16]) = _cpuid_info.proc_name_4;
2711 *((uint32_t*) &buf[20]) = _cpuid_info.proc_name_5;
2712 *((uint32_t*) &buf[24]) = _cpuid_info.proc_name_6;
2713 *((uint32_t*) &buf[28]) = _cpuid_info.proc_name_7;
2714 *((uint32_t*) &buf[32]) = _cpuid_info.proc_name_8;
2715 *((uint32_t*) &buf[36]) = _cpuid_info.proc_name_9;
2716 *((uint32_t*) &buf[40]) = _cpuid_info.proc_name_10;
2717 *((uint32_t*) &buf[44]) = _cpuid_info.proc_name_11;
2718
2719 return OS_OK;
2720 }
2721
2722 size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
2723 guarantee(buf != nullptr, "buffer is null!");
2724 guarantee(buf_len > 0, "buffer len not enough!");
2725
2726 unsigned int flag = 0;
2727 unsigned int fi = 0;
2728 size_t written = 0;
2729 const char* prefix = "";
2730
2731 #define WRITE_TO_BUF(string) \
2732 { \
2733 int res = jio_snprintf(&buf[written], buf_len - written, "%s%s", prefix, string); \
2734 if (res < 0) { \
2735 return buf_len - 1; \
2736 } \
2737 written += res; \
2738 if (prefix[0] == '\0') { \
2739 prefix = ", "; \
2740 } \
2741 }
2742
2743 for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
2744 if (flag == HTT_FLAG && (((_cpuid_info.std_cpuid1_ebx.value >> 16) & 0xff) <= 1)) {
2745 continue; /* no hyperthreading */
2746 } else if (flag == SEP_FLAG && (cpu_family() == CPU_FAMILY_PENTIUMPRO && ((_cpuid_info.std_cpuid1_eax.value & 0xff) < 0x33))) {
2747 continue; /* no fast system call */
2748 }
2749 if ((_cpuid_info.std_cpuid1_edx.value & flag) && strlen(_feature_edx_id[fi]) > 0) {
2750 WRITE_TO_BUF(_feature_edx_id[fi]);
2751 }
2752 }
2753
2754 for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
2755 if ((_cpuid_info.std_cpuid1_ecx.value & flag) && strlen(_feature_ecx_id[fi]) > 0) {
2756 WRITE_TO_BUF(_feature_ecx_id[fi]);
2757 }
2758 }
2759
2760 for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
2761 if ((_cpuid_info.ext_cpuid1_ecx.value & flag) && strlen(_feature_extended_ecx_id[fi]) > 0) {
2762 WRITE_TO_BUF(_feature_extended_ecx_id[fi]);
2763 }
2764 }
2765
2766 for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
2767 if ((_cpuid_info.ext_cpuid1_edx.value & flag) && strlen(_feature_extended_edx_id[fi]) > 0) {
2768 WRITE_TO_BUF(_feature_extended_edx_id[fi]);
2769 }
2770 }
2771
2772 if (supports_tscinv_bit()) {
2773 WRITE_TO_BUF("Invariant TSC");
2774 }
2775
2776 if (supports_hybrid()) {
2777 WRITE_TO_BUF("Hybrid Architecture");
2778 }
2779
2780 return written;
2781 }
2782
2783 /**
2784 * Write a detailed description of the cpu to a given buffer, including
2785 * feature set.
2786 */
2787 int VM_Version::cpu_detailed_description(char* const buf, size_t buf_len) {
2788 assert(buf != nullptr, "buffer is null!");
2789 assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!");
2790
2791 static const char* unknown = "<unknown>";
2792 char vendor_id[VENDOR_LENGTH];
2793 const char* family = nullptr;
2794 const char* model = nullptr;
2795 const char* brand = nullptr;
2796 int outputLen = 0;
2797
2798 family = cpu_family_description();
2799 if (family == nullptr) {
2800 family = unknown;
2801 }
2802
2803 model = cpu_model_description();
2804 if (model == nullptr) {
2805 model = unknown;
2806 }
2807
2808 brand = cpu_brand_string();
2809
2810 if (brand == nullptr) {
2811 brand = cpu_brand();
2812 if (brand == nullptr) {
2813 brand = unknown;
2814 }
2815 }
2816
2817 *((uint32_t*) &vendor_id[0]) = _cpuid_info.std_vendor_name_0;
2818 *((uint32_t*) &vendor_id[4]) = _cpuid_info.std_vendor_name_2;
2819 *((uint32_t*) &vendor_id[8]) = _cpuid_info.std_vendor_name_1;
2820 vendor_id[VENDOR_LENGTH-1] = '\0';
2821
2822 outputLen = jio_snprintf(buf, buf_len, "Brand: %s, Vendor: %s\n"
2823 "Family: %s (0x%x), Model: %s (0x%x), Stepping: 0x%x\n"
2824 "Ext. family: 0x%x, Ext. model: 0x%x, Type: 0x%x, Signature: 0x%8.8x\n"
2825 "Features: ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
2826 "Ext. features: eax: 0x%8.8x, ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
2827 "Supports: ",
2828 brand,
2829 vendor_id,
2830 family,
2831 extended_cpu_family(),
2832 model,
2833 extended_cpu_model(),
2834 cpu_stepping(),
2835 _cpuid_info.std_cpuid1_eax.bits.ext_family,
2836 _cpuid_info.std_cpuid1_eax.bits.ext_model,
2837 _cpuid_info.std_cpuid1_eax.bits.proc_type,
2838 _cpuid_info.std_cpuid1_eax.value,
2839 _cpuid_info.std_cpuid1_ebx.value,
2840 _cpuid_info.std_cpuid1_ecx.value,
2841 _cpuid_info.std_cpuid1_edx.value,
2842 _cpuid_info.ext_cpuid1_eax,
2843 _cpuid_info.ext_cpuid1_ebx,
2844 _cpuid_info.ext_cpuid1_ecx,
2845 _cpuid_info.ext_cpuid1_edx);
2846
2847 if (outputLen < 0 || (size_t) outputLen >= buf_len - 1) {
2848 if (buf_len > 0) { buf[buf_len-1] = '\0'; }
2849 return OS_ERR;
2850 }
2851
2852 cpu_write_support_string(&buf[outputLen], buf_len - outputLen);
2853
2854 return OS_OK;
2855 }
2856
2857
2858 // Fill in Abstract_VM_Version statics
2859 void VM_Version::initialize_cpu_information() {
2860 assert(_vm_version_initialized, "should have initialized VM_Version long ago");
2861 assert(!_initialized, "shouldn't be initialized yet");
2862 resolve_cpu_information_details();
2863
2864 // initialize cpu_name and cpu_desc
2865 cpu_type_description(_cpu_name, CPU_TYPE_DESC_BUF_SIZE);
2866 cpu_detailed_description(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE);
2867 _initialized = true;
2868 }
2869
2870 /**
2871 * For information about extracting the frequency from the cpu brand string, please see:
2872 *
2873 * Intel Processor Identification and the CPUID Instruction
2874 * Application Note 485
2875 * May 2012
2876 *
2877 * The return value is the frequency in Hz.
2878 */
2879 int64_t VM_Version::max_qualified_cpu_freq_from_brand_string(void) {
2880 const char* const brand_string = cpu_brand_string();
2881 if (brand_string == nullptr) {
2882 return 0;
2883 }
2884 const int64_t MEGA = 1000000;
2885 int64_t multiplier = 0;
2886 int64_t frequency = 0;
2887 uint8_t idx = 0;
2888 // The brand string buffer is at most 48 bytes.
2889 // -2 is to prevent buffer overrun when looking for y in yHz, as z is +2 from y.
2890 for (; idx < 48-2; ++idx) {
2891 // Format is either "x.xxyHz" or "xxxxyHz", where y=M, G, T and x are digits.
2892 // Search brand string for "yHz" where y is M, G, or T.
2893 if (brand_string[idx+1] == 'H' && brand_string[idx+2] == 'z') {
2894 if (brand_string[idx] == 'M') {
2895 multiplier = MEGA;
2896 } else if (brand_string[idx] == 'G') {
2897 multiplier = MEGA * 1000;
2898 } else if (brand_string[idx] == 'T') {
2899 multiplier = MEGA * MEGA;
2900 }
2901 break;
2902 }
2903 }
2904 if (multiplier > 0) {
2905 // Compute frequency (in Hz) from brand string.
2906 if (brand_string[idx-3] == '.') { // if format is "x.xx"
2907 frequency = (brand_string[idx-4] - '0') * multiplier;
2908 frequency += (brand_string[idx-2] - '0') * multiplier / 10;
2909 frequency += (brand_string[idx-1] - '0') * multiplier / 100;
2910 } else { // format is "xxxx"
2911 frequency = (brand_string[idx-4] - '0') * 1000;
2912 frequency += (brand_string[idx-3] - '0') * 100;
2913 frequency += (brand_string[idx-2] - '0') * 10;
2914 frequency += (brand_string[idx-1] - '0');
2915 frequency *= multiplier;
2916 }
2917 }
2918 return frequency;
2919 }
2920
2921
2922 int64_t VM_Version::maximum_qualified_cpu_frequency(void) {
2923 if (_max_qualified_cpu_frequency == 0) {
2924 _max_qualified_cpu_frequency = max_qualified_cpu_freq_from_brand_string();
2925 }
2926 return _max_qualified_cpu_frequency;
2927 }
2928
2929 VM_Version::VM_Features VM_Version::CpuidInfo::feature_flags() const {
2930 VM_Features vm_features;
2931 if (std_cpuid1_edx.bits.cmpxchg8 != 0)
2932 vm_features.set_feature(CPU_CX8);
2933 if (std_cpuid1_edx.bits.cmov != 0)
2934 vm_features.set_feature(CPU_CMOV);
2935 if (std_cpuid1_edx.bits.clflush != 0)
2936 vm_features.set_feature(CPU_FLUSH);
2937 // clflush should always be available on x86_64
2938 // if not we are in real trouble because we rely on it
2939 // to flush the code cache.
2940 assert (vm_features.supports_feature(CPU_FLUSH), "clflush should be available");
2941 if (std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
2942 ext_cpuid1_edx.bits.fxsr != 0))
2943 vm_features.set_feature(CPU_FXSR);
2944 // HT flag is set for multi-core processors also.
2945 if (threads_per_core() > 1)
2946 vm_features.set_feature(CPU_HT);
2947 if (std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
2948 ext_cpuid1_edx.bits.mmx != 0))
2949 vm_features.set_feature(CPU_MMX);
2950 if (std_cpuid1_edx.bits.sse != 0)
2951 vm_features.set_feature(CPU_SSE);
2952 if (std_cpuid1_edx.bits.sse2 != 0)
2953 vm_features.set_feature(CPU_SSE2);
2954 if (std_cpuid1_ecx.bits.sse3 != 0)
2955 vm_features.set_feature(CPU_SSE3);
2956 if (std_cpuid1_ecx.bits.ssse3 != 0)
2957 vm_features.set_feature(CPU_SSSE3);
2958 if (std_cpuid1_ecx.bits.sse4_1 != 0)
2959 vm_features.set_feature(CPU_SSE4_1);
2960 if (std_cpuid1_ecx.bits.sse4_2 != 0)
2961 vm_features.set_feature(CPU_SSE4_2);
2962 if (std_cpuid1_ecx.bits.popcnt != 0)
2963 vm_features.set_feature(CPU_POPCNT);
2964 if (sefsl1_cpuid7_edx.bits.apx_f != 0 &&
2965 xem_xcr0_eax.bits.apx_f != 0 &&
2966 std_cpuid29_ebx.bits.apx_nci_ndd_nf != 0) {
2967 vm_features.set_feature(CPU_APX_F);
2968 }
2969 if (std_cpuid1_ecx.bits.avx != 0 &&
2970 std_cpuid1_ecx.bits.osxsave != 0 &&
2971 xem_xcr0_eax.bits.sse != 0 &&
2972 xem_xcr0_eax.bits.ymm != 0) {
2973 vm_features.set_feature(CPU_AVX);
2974 vm_features.set_feature(CPU_VZEROUPPER);
2975 if (sefsl1_cpuid7_eax.bits.sha512 != 0)
2976 vm_features.set_feature(CPU_SHA512);
2977 if (std_cpuid1_ecx.bits.f16c != 0)
2978 vm_features.set_feature(CPU_F16C);
2979 if (sef_cpuid7_ebx.bits.avx2 != 0) {
2980 vm_features.set_feature(CPU_AVX2);
2981 if (sefsl1_cpuid7_eax.bits.avx_ifma != 0)
2982 vm_features.set_feature(CPU_AVX_IFMA);
2983 }
2984 if (sef_cpuid7_ecx.bits.gfni != 0)
2985 vm_features.set_feature(CPU_GFNI);
2986 if (sef_cpuid7_ebx.bits.avx512f != 0 &&
2987 xem_xcr0_eax.bits.opmask != 0 &&
2988 xem_xcr0_eax.bits.zmm512 != 0 &&
2989 xem_xcr0_eax.bits.zmm32 != 0) {
2990 vm_features.set_feature(CPU_AVX512F);
2991 if (sef_cpuid7_ebx.bits.avx512cd != 0)
2992 vm_features.set_feature(CPU_AVX512CD);
2993 if (sef_cpuid7_ebx.bits.avx512dq != 0)
2994 vm_features.set_feature(CPU_AVX512DQ);
2995 if (sef_cpuid7_ebx.bits.avx512ifma != 0)
2996 vm_features.set_feature(CPU_AVX512_IFMA);
2997 if (sef_cpuid7_ebx.bits.avx512pf != 0)
2998 vm_features.set_feature(CPU_AVX512PF);
2999 if (sef_cpuid7_ebx.bits.avx512er != 0)
3000 vm_features.set_feature(CPU_AVX512ER);
3001 if (sef_cpuid7_ebx.bits.avx512bw != 0)
3002 vm_features.set_feature(CPU_AVX512BW);
3003 if (sef_cpuid7_ebx.bits.avx512vl != 0)
3004 vm_features.set_feature(CPU_AVX512VL);
3005 if (sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
3006 vm_features.set_feature(CPU_AVX512_VPOPCNTDQ);
3007 if (sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
3008 vm_features.set_feature(CPU_AVX512_VPCLMULQDQ);
3009 if (sef_cpuid7_ecx.bits.vaes != 0)
3010 vm_features.set_feature(CPU_AVX512_VAES);
3011 if (sef_cpuid7_ecx.bits.avx512_vnni != 0)
3012 vm_features.set_feature(CPU_AVX512_VNNI);
3013 if (sef_cpuid7_ecx.bits.avx512_bitalg != 0)
3014 vm_features.set_feature(CPU_AVX512_BITALG);
3015 if (sef_cpuid7_ecx.bits.avx512_vbmi != 0)
3016 vm_features.set_feature(CPU_AVX512_VBMI);
3017 if (sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
3018 vm_features.set_feature(CPU_AVX512_VBMI2);
3019 }
3020 if (is_intel()) {
3021 if (sefsl1_cpuid7_edx.bits.avx10 != 0 &&
3022 std_cpuid24_ebx.bits.avx10_vlen_512 !=0 &&
3023 std_cpuid24_ebx.bits.avx10_converged_isa_version >= 1 &&
3024 xem_xcr0_eax.bits.opmask != 0 &&
3025 xem_xcr0_eax.bits.zmm512 != 0 &&
3026 xem_xcr0_eax.bits.zmm32 != 0) {
3027 vm_features.set_feature(CPU_AVX10_1);
3028 vm_features.set_feature(CPU_AVX512F);
3029 vm_features.set_feature(CPU_AVX512CD);
3030 vm_features.set_feature(CPU_AVX512DQ);
3031 vm_features.set_feature(CPU_AVX512PF);
3032 vm_features.set_feature(CPU_AVX512ER);
3033 vm_features.set_feature(CPU_AVX512BW);
3034 vm_features.set_feature(CPU_AVX512VL);
3035 vm_features.set_feature(CPU_AVX512_VPOPCNTDQ);
3036 vm_features.set_feature(CPU_AVX512_VPCLMULQDQ);
3037 vm_features.set_feature(CPU_AVX512_VAES);
3038 vm_features.set_feature(CPU_AVX512_VNNI);
3039 vm_features.set_feature(CPU_AVX512_BITALG);
3040 vm_features.set_feature(CPU_AVX512_VBMI);
3041 vm_features.set_feature(CPU_AVX512_VBMI2);
3042 if (std_cpuid24_ebx.bits.avx10_converged_isa_version >= 2) {
3043 vm_features.set_feature(CPU_AVX10_2);
3044 }
3045 }
3046 }
3047 }
3048
3049 if (std_cpuid1_ecx.bits.hv != 0)
3050 vm_features.set_feature(CPU_HV);
3051 if (sef_cpuid7_ebx.bits.bmi1 != 0)
3052 vm_features.set_feature(CPU_BMI1);
3053 if (std_cpuid1_edx.bits.tsc != 0)
3054 vm_features.set_feature(CPU_TSC);
3055 if (ext_cpuid7_edx.bits.tsc_invariance != 0)
3056 vm_features.set_feature(CPU_TSCINV_BIT);
3057 if (std_cpuid1_ecx.bits.aes != 0)
3058 vm_features.set_feature(CPU_AES);
3059 if (ext_cpuid1_ecx.bits.lzcnt != 0)
3060 vm_features.set_feature(CPU_LZCNT);
3061 if (ext_cpuid1_ecx.bits.prefetchw != 0)
3062 vm_features.set_feature(CPU_3DNOW_PREFETCH);
3063 if (sef_cpuid7_ebx.bits.erms != 0)
3064 vm_features.set_feature(CPU_ERMS);
3065 if (sef_cpuid7_edx.bits.fast_short_rep_mov != 0)
3066 vm_features.set_feature(CPU_FSRM);
3067 if (std_cpuid1_ecx.bits.clmul != 0)
3068 vm_features.set_feature(CPU_CLMUL);
3069 if (sef_cpuid7_ebx.bits.rtm != 0)
3070 vm_features.set_feature(CPU_RTM);
3071 if (sef_cpuid7_ebx.bits.adx != 0)
3072 vm_features.set_feature(CPU_ADX);
3073 if (sef_cpuid7_ebx.bits.bmi2 != 0)
3074 vm_features.set_feature(CPU_BMI2);
3075 if (sef_cpuid7_ebx.bits.sha != 0)
3076 vm_features.set_feature(CPU_SHA);
3077 if (std_cpuid1_ecx.bits.fma != 0)
3078 vm_features.set_feature(CPU_FMA);
3079 if (sef_cpuid7_ebx.bits.clflushopt != 0)
3080 vm_features.set_feature(CPU_FLUSHOPT);
3081 if (sef_cpuid7_ebx.bits.clwb != 0)
3082 vm_features.set_feature(CPU_CLWB);
3083 if (ext_cpuid1_edx.bits.rdtscp != 0)
3084 vm_features.set_feature(CPU_RDTSCP);
3085 if (sef_cpuid7_ecx.bits.rdpid != 0)
3086 vm_features.set_feature(CPU_RDPID);
3087
3088 // AMD|Hygon additional features.
3089 if (is_amd_family()) {
3090 // PREFETCHW was checked above, check TDNOW here.
3091 if ((ext_cpuid1_edx.bits.tdnow != 0))
3092 vm_features.set_feature(CPU_3DNOW_PREFETCH);
3093 if (ext_cpuid1_ecx.bits.sse4a != 0)
3094 vm_features.set_feature(CPU_SSE4A);
3095 }
3096
3097 // Intel additional features.
3098 if (is_intel()) {
3099 if (sef_cpuid7_edx.bits.serialize != 0)
3100 vm_features.set_feature(CPU_SERIALIZE);
3101 if (sef_cpuid7_edx.bits.hybrid != 0)
3102 vm_features.set_feature(CPU_HYBRID);
3103 if (_cpuid_info.sef_cpuid7_edx.bits.avx512_fp16 != 0)
3104 vm_features.set_feature(CPU_AVX512_FP16);
3105 }
3106
3107 // ZX additional features.
3108 if (is_zx()) {
3109 // We do not know if these are supported by ZX, so we cannot trust
3110 // common CPUID bit for them.
3111 assert(vm_features.supports_feature(CPU_CLWB), "Check if it is supported?");
3112 vm_features.clear_feature(CPU_CLWB);
3113 }
3114
3115 // Protection key features.
3116 if (sef_cpuid7_ecx.bits.pku != 0) {
3117 vm_features.set_feature(CPU_PKU);
3118 }
3119 if (sef_cpuid7_ecx.bits.ospke != 0) {
3120 vm_features.set_feature(CPU_OSPKE);
3121 }
3122
3123 // Control flow enforcement (CET) features.
3124 if (sef_cpuid7_ecx.bits.cet_ss != 0) {
3125 vm_features.set_feature(CPU_CET_SS);
3126 }
3127 if (sef_cpuid7_edx.bits.cet_ibt != 0) {
3128 vm_features.set_feature(CPU_CET_IBT);
3129 }
3130
3131 // Composite features.
3132 if (supports_tscinv_bit() &&
3133 ((is_amd_family() && !is_amd_Barcelona()) ||
3134 is_intel_tsc_synched_at_init())) {
3135 vm_features.set_feature(CPU_TSCINV);
3136 }
3137 return vm_features;
3138 }
3139
3140 bool VM_Version::os_supports_avx_vectors() {
3141 bool retVal = false;
3142 int nreg = 4;
3143 if (supports_evex()) {
3144 // Verify that OS save/restore all bits of EVEX registers
3145 // during signal processing.
3146 retVal = true;
3147 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
3148 if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
3149 retVal = false;
3150 break;
3151 }
3152 }
3153 } else if (supports_avx()) {
3154 // Verify that OS save/restore all bits of AVX registers
3155 // during signal processing.
3156 retVal = true;
3157 for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
3158 if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
3159 retVal = false;
3160 break;
3161 }
3162 }
3163 // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen
3164 if (retVal == false) {
3165 // Verify that OS save/restore all bits of EVEX registers
3166 // during signal processing.
3167 retVal = true;
3168 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
3169 if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
3170 retVal = false;
3171 break;
3172 }
3173 }
3174 }
3175 }
3176 return retVal;
3177 }
3178
3179 bool VM_Version::os_supports_apx_egprs() {
3180 if (!supports_apx_f()) {
3181 return false;
3182 }
3183 if (_cpuid_info.apx_save[0] != egpr_test_value() ||
3184 _cpuid_info.apx_save[1] != egpr_test_value()) {
3185 return false;
3186 }
3187 return true;
3188 }
3189
3190 uint VM_Version::cores_per_cpu() {
3191 uint result = 1;
3192 if (is_intel()) {
3193 bool supports_topology = supports_processor_topology();
3194 if (supports_topology) {
3195 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
3196 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3197 }
3198 if (!supports_topology || result == 0) {
3199 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
3200 }
3201 } else if (is_amd_family()) {
3202 result = _cpuid_info.ext_cpuid8_ecx.bits.threads_per_cpu + 1;
3203 if (cpu_family() >= 0x17) { // Zen or later
3204 result /= _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
3205 }
3206 } else if (is_zx()) {
3207 bool supports_topology = supports_processor_topology();
3208 if (supports_topology) {
3209 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
3210 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3211 }
3212 if (!supports_topology || result == 0) {
3213 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
3214 }
3215 }
3216 return result;
3217 }
3218
3219 uint VM_Version::threads_per_core() {
3220 uint result = 1;
3221 if (is_intel() && supports_processor_topology()) {
3222 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3223 } else if (is_zx() && supports_processor_topology()) {
3224 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3225 } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
3226 if (cpu_family() >= 0x17) {
3227 result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
3228 } else {
3229 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
3230 cores_per_cpu();
3231 }
3232 }
3233 return (result == 0 ? 1 : result);
3234 }
3235
3236 uint VM_Version::L1_line_size() {
3237 uint result = 0;
3238 if (is_intel()) {
3239 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
3240 } else if (is_amd_family()) {
3241 result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
3242 } else if (is_zx()) {
3243 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
3244 }
3245 if (result < 32) // not defined ?
3246 result = 32; // 32 bytes by default on x86 and other x64
3247 return result;
3248 }
3249
3250 bool VM_Version::is_intel_tsc_synched_at_init() {
3251 if (is_intel_family_core()) {
3252 uint32_t ext_model = extended_cpu_model();
3253 if (ext_model == CPU_MODEL_NEHALEM_EP ||
3254 ext_model == CPU_MODEL_WESTMERE_EP ||
3255 ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
3256 ext_model == CPU_MODEL_IVYBRIDGE_EP) {
3257 // <= 2-socket invariant tsc support. EX versions are usually used
3258 // in > 2-socket systems and likely don't synchronize tscs at
3259 // initialization.
3260 // Code that uses tsc values must be prepared for them to arbitrarily
3261 // jump forward or backward.
3262 return true;
3263 }
3264 }
3265 return false;
3266 }
3267
3268 int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
3269 // Hardware prefetching (distance/size in bytes):
3270 // Pentium 3 - 64 / 32
3271 // Pentium 4 - 256 / 128
3272 // Athlon - 64 / 32 ????
3273 // Opteron - 128 / 64 only when 2 sequential cache lines accessed
3274 // Core - 128 / 64
3275 //
3276 // Software prefetching (distance in bytes / instruction with best score):
3277 // Pentium 3 - 128 / prefetchnta
3278 // Pentium 4 - 512 / prefetchnta
3279 // Athlon - 128 / prefetchnta
3280 // Opteron - 256 / prefetchnta
3281 // Core - 256 / prefetchnta
3282 // It will be used only when AllocatePrefetchStyle > 0
3283
3284 if (is_amd_family()) { // AMD | Hygon
3285 if (supports_sse2()) {
3286 return 256; // Opteron
3287 } else {
3288 return 128; // Athlon
3289 }
3290 } else if (is_zx()) {
3291 if (supports_sse2()) {
3292 return 256;
3293 } else {
3294 return 128;
3295 }
3296 } else { // Intel
3297 if (supports_sse3() && is_intel_server_family()) {
3298 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
3299 return 192;
3300 } else if (use_watermark_prefetch) { // watermark prefetching on Core
3301 return 384;
3302 }
3303 }
3304 if (supports_sse2()) {
3305 if (is_intel_server_family()) {
3306 return 256; // Pentium M, Core, Core2
3307 } else {
3308 return 512; // Pentium 4
3309 }
3310 } else {
3311 return 128; // Pentium 3 (and all other old CPUs)
3312 }
3313 }
3314 }
3315
3316 bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
3317 assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
3318 switch (id) {
3319 case vmIntrinsics::_floatToFloat16:
3320 case vmIntrinsics::_float16ToFloat:
3321 if (!supports_float16()) {
3322 return false;
3323 }
3324 break;
3325 default:
3326 break;
3327 }
3328 return true;
3329 }
3330
3331 void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
3332 int i = 0;
3333 ss.join([&]() {
3334 const char* str = nullptr;
3335 while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
3336 if (features.supports_feature((VM_Version::Feature_Flag)i)) {
3337 str = _features_names[i];
3338 }
3339 i += 1;
3340 }
3341 return str;
3342 }, ", ");
3343 }
3344
3345 void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
3346 VM_Features* features = (VM_Features*)features_buffer;
3347 insert_features_names(*features, ss);
3348 }
3349
3350 void VM_Version::get_missing_features_name(void* features_set1, void* features_set2, stringStream& ss) {
3351 VM_Features* vm_features_set1 = (VM_Features*)features_set1;
3352 VM_Features* vm_features_set2 = (VM_Features*)features_set2;
3353 int i = 0;
3354 ss.join([&]() {
3355 const char* str = nullptr;
3356 while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
3357 Feature_Flag flag = (Feature_Flag)i;
3358 if (vm_features_set1->supports_feature(flag) && !vm_features_set2->supports_feature(flag)) {
3359 str = _features_names[i];
3360 }
3361 i += 1;
3362 }
3363 return str;
3364 }, ", ");
3365 }
3366
3367 int VM_Version::cpu_features_size() {
3368 return sizeof(VM_Features);
3369 }
3370
3371 void VM_Version::store_cpu_features(void* buf) {
3372 VM_Features copy = _features;
3373 copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
3374 memcpy(buf, ©, sizeof(VM_Features));
3375 }
3376
3377 bool VM_Version::supports_features(void* features_buffer) {
3378 VM_Features* features_to_test = (VM_Features*)features_buffer;
3379 return _features.supports_features(features_to_test);
3380 }