1 /*
2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_VM_VERSION_X86_HPP
26 #define CPU_X86_VM_VERSION_X86_HPP
27
28 #include "memory/universe.hpp"
29 #include "runtime/abstract_vm_version.hpp"
30 #include "utilities/macros.hpp"
31
32 class VM_Version : public Abstract_VM_Version {
33 friend class VMStructs;
34 friend class JVMCIVMStructs;
35
36 public:
37 // cpuid result register layouts. These are all unions of a uint32_t
38 // (in case anyone wants access to the register as a whole) and a bitfield.
39
40 union StdCpuid1Eax {
41 uint32_t value;
42 struct {
43 uint32_t stepping : 4,
44 model : 4,
45 family : 4,
46 proc_type : 2,
47 : 2,
48 ext_model : 4,
49 ext_family : 8,
50 : 4;
51 } bits;
52 };
53
54 union StdCpuid1Ebx { // example, unused
55 uint32_t value;
56 struct {
57 uint32_t brand_id : 8,
58 clflush_size : 8,
59 threads_per_cpu : 8,
60 apic_id : 8;
61 } bits;
62 };
63
64 union StdCpuid1Ecx {
65 uint32_t value;
66 struct {
67 uint32_t sse3 : 1,
68 clmul : 1,
69 : 1,
70 monitor : 1,
71 : 1,
72 vmx : 1,
73 : 1,
74 est : 1,
75 : 1,
76 ssse3 : 1,
77 cid : 1,
78 : 1,
79 fma : 1,
80 cmpxchg16: 1,
81 : 4,
82 dca : 1,
83 sse4_1 : 1,
84 sse4_2 : 1,
85 : 2,
86 popcnt : 1,
87 : 1,
88 aes : 1,
89 : 1,
90 osxsave : 1,
91 avx : 1,
92 : 2,
93 hv : 1;
94 } bits;
95 };
96
97 union StdCpuid1Edx {
98 uint32_t value;
99 struct {
100 uint32_t : 4,
101 tsc : 1,
102 : 3,
103 cmpxchg8 : 1,
104 : 6,
105 cmov : 1,
106 : 3,
107 clflush : 1,
108 : 3,
109 mmx : 1,
110 fxsr : 1,
111 sse : 1,
112 sse2 : 1,
113 : 1,
114 ht : 1,
115 : 3;
116 } bits;
117 };
118
119 union DcpCpuid4Eax {
120 uint32_t value;
121 struct {
122 uint32_t cache_type : 5,
123 : 21,
124 cores_per_cpu : 6;
125 } bits;
126 };
127
128 union DcpCpuid4Ebx {
129 uint32_t value;
130 struct {
131 uint32_t L1_line_size : 12,
132 partitions : 10,
133 associativity : 10;
134 } bits;
135 };
136
137 union TplCpuidBEbx {
138 uint32_t value;
139 struct {
140 uint32_t logical_cpus : 16,
141 : 16;
142 } bits;
143 };
144
145 union ExtCpuid1Ecx {
146 uint32_t value;
147 struct {
148 uint32_t LahfSahf : 1,
149 CmpLegacy : 1,
150 : 3,
151 lzcnt : 1,
152 sse4a : 1,
153 misalignsse : 1,
154 prefetchw : 1,
155 : 23;
156 } bits;
157 };
158
159 union ExtCpuid1Edx {
160 uint32_t value;
161 struct {
162 uint32_t : 22,
163 mmx_amd : 1,
164 mmx : 1,
165 fxsr : 1,
166 : 4,
167 long_mode : 1,
168 tdnow2 : 1,
169 tdnow : 1;
170 } bits;
171 };
172
173 union ExtCpuid5Ex {
174 uint32_t value;
175 struct {
176 uint32_t L1_line_size : 8,
177 L1_tag_lines : 8,
178 L1_assoc : 8,
179 L1_size : 8;
180 } bits;
181 };
182
183 union ExtCpuid7Edx {
184 uint32_t value;
185 struct {
186 uint32_t : 8,
187 tsc_invariance : 1,
188 : 23;
189 } bits;
190 };
191
192 union ExtCpuid8Ecx {
193 uint32_t value;
194 struct {
195 uint32_t cores_per_cpu : 8,
196 : 24;
197 } bits;
198 };
199
200 union SefCpuid7Eax {
201 uint32_t value;
202 };
203
204 union SefCpuid7Ebx {
205 uint32_t value;
206 struct {
207 uint32_t fsgsbase : 1,
208 : 2,
209 bmi1 : 1,
210 : 1,
211 avx2 : 1,
212 : 2,
213 bmi2 : 1,
214 erms : 1,
215 : 1,
216 rtm : 1,
217 : 4,
218 avx512f : 1,
219 avx512dq : 1,
220 : 1,
221 adx : 1,
222 : 3,
223 clflushopt : 1,
224 clwb : 1,
225 : 1,
226 avx512pf : 1,
227 avx512er : 1,
228 avx512cd : 1,
229 sha : 1,
230 avx512bw : 1,
231 avx512vl : 1;
232 } bits;
233 };
234
235 union SefCpuid7Ecx {
236 uint32_t value;
237 struct {
238 uint32_t prefetchwt1 : 1,
239 avx512_vbmi : 1,
240 umip : 1,
241 pku : 1,
242 ospke : 1,
243 : 1,
244 avx512_vbmi2 : 1,
245 : 1,
246 gfni : 1,
247 vaes : 1,
248 avx512_vpclmulqdq : 1,
249 avx512_vnni : 1,
250 avx512_bitalg : 1,
251 : 1,
252 avx512_vpopcntdq : 1,
253 : 17;
254 } bits;
255 };
256
257 union SefCpuid7Edx {
258 uint32_t value;
259 struct {
260 uint32_t : 2,
261 avx512_4vnniw : 1,
262 avx512_4fmaps : 1,
263 : 28;
264 } bits;
265 };
266
267 union ExtCpuid1EEbx {
268 uint32_t value;
269 struct {
270 uint32_t : 8,
271 threads_per_core : 8,
272 : 16;
273 } bits;
274 };
275
276 union XemXcr0Eax {
277 uint32_t value;
278 struct {
279 uint32_t x87 : 1,
280 sse : 1,
281 ymm : 1,
282 bndregs : 1,
283 bndcsr : 1,
284 opmask : 1,
285 zmm512 : 1,
286 zmm32 : 1,
287 : 24;
288 } bits;
289 };
290
291 protected:
292 static int _cpu;
293 static int _model;
294 static int _stepping;
295
296 static bool _has_intel_jcc_erratum;
297
298 static address _cpuinfo_segv_addr; // address of instruction which causes SEGV
299 static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV
300
301 enum Feature_Flag : uint64_t {
302 #define CPU_FEATURE_FLAGS(decl) \
303 decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \
304 decl(CMOV, "cmov", 1) \
305 decl(FXSR, "fxsr", 2) \
306 decl(HT, "ht", 3) \
307 \
308 decl(MMX, "mmx", 4) \
309 decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \
310 /* may not necessarily support other 3dnow instructions */ \
311 decl(SSE, "sse", 6) \
312 decl(SSE2, "sse2", 7) \
313 \
314 decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \
315 decl(SSSE3, "ssse3", 9 ) \
316 decl(SSE4A, "sse4a", 10) \
317 decl(SSE4_1, "sse4.1", 11) \
318 \
319 decl(SSE4_2, "sse4.2", 12) \
320 decl(POPCNT, "popcnt", 13) \
321 decl(LZCNT, "lzcnt", 14) \
322 decl(TSC, "tsc", 15) \
323 \
324 decl(TSCINV_BIT, "tscinvbit", 16) \
325 decl(TSCINV, "tscinv", 17) \
326 decl(AVX, "avx", 18) \
327 decl(AVX2, "avx2", 19) \
328 \
329 decl(AES, "aes", 20) \
330 decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \
331 decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \
332 decl(BMI1, "bmi1", 23) \
333 \
334 decl(BMI2, "bmi2", 24) \
335 decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \
336 decl(ADX, "adx", 26) \
337 decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \
338 \
339 decl(AVX512DQ, "avx512dq", 28) \
340 decl(AVX512PF, "avx512pf", 29) \
341 decl(AVX512ER, "avx512er", 30) \
342 decl(AVX512CD, "avx512cd", 31) \
343 \
344 decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \
345 decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \
346 decl(SHA, "sha", 34) /* SHA instructions */ \
347 decl(FMA, "fma", 35) /* FMA instructions */ \
348 \
349 decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \
350 decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \
351 decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \
352 decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \
353 \
354 decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \
355 decl(FLUSH, "clflush", 41) /* flush instruction */ \
356 decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \
357 decl(CLWB, "clwb", 43) /* clwb instruction */ \
358 \
359 decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \
360 decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \
361 decl(HV, "hv", 46) /* Hypervisor instructions */
362
363 #define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1ULL << bit),
364 CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG)
365 #undef DECLARE_CPU_FEATURE_FLAG
366 };
367
368 static const char* _features_names[];
369
370 enum Extended_Family {
371 // AMD
372 CPU_FAMILY_AMD_11H = 0x11,
373 // ZX
374 CPU_FAMILY_ZX_CORE_F6 = 6,
375 CPU_FAMILY_ZX_CORE_F7 = 7,
376 // Intel
377 CPU_FAMILY_INTEL_CORE = 6,
378 CPU_MODEL_NEHALEM = 0x1e,
379 CPU_MODEL_NEHALEM_EP = 0x1a,
380 CPU_MODEL_NEHALEM_EX = 0x2e,
381 CPU_MODEL_WESTMERE = 0x25,
382 CPU_MODEL_WESTMERE_EP = 0x2c,
383 CPU_MODEL_WESTMERE_EX = 0x2f,
384 CPU_MODEL_SANDYBRIDGE = 0x2a,
385 CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
386 CPU_MODEL_IVYBRIDGE_EP = 0x3a,
387 CPU_MODEL_HASWELL_E3 = 0x3c,
388 CPU_MODEL_HASWELL_E7 = 0x3f,
389 CPU_MODEL_BROADWELL = 0x3d,
390 CPU_MODEL_SKYLAKE = 0x55
391 };
392
393 // cpuid information block. All info derived from executing cpuid with
394 // various function numbers is stored here. Intel and AMD info is
395 // merged in this block: accessor methods disentangle it.
396 //
397 // The info block is laid out in subblocks of 4 dwords corresponding to
398 // eax, ebx, ecx and edx, whether or not they contain anything useful.
399 struct CpuidInfo {
400 // cpuid function 0
401 uint32_t std_max_function;
402 uint32_t std_vendor_name_0;
403 uint32_t std_vendor_name_1;
404 uint32_t std_vendor_name_2;
405
406 // cpuid function 1
407 StdCpuid1Eax std_cpuid1_eax;
408 StdCpuid1Ebx std_cpuid1_ebx;
409 StdCpuid1Ecx std_cpuid1_ecx;
410 StdCpuid1Edx std_cpuid1_edx;
411
412 // cpuid function 4 (deterministic cache parameters)
413 DcpCpuid4Eax dcp_cpuid4_eax;
414 DcpCpuid4Ebx dcp_cpuid4_ebx;
415 uint32_t dcp_cpuid4_ecx; // unused currently
416 uint32_t dcp_cpuid4_edx; // unused currently
417
418 // cpuid function 7 (structured extended features)
419 SefCpuid7Eax sef_cpuid7_eax;
420 SefCpuid7Ebx sef_cpuid7_ebx;
421 SefCpuid7Ecx sef_cpuid7_ecx;
422 SefCpuid7Edx sef_cpuid7_edx;
423
424 // cpuid function 0xB (processor topology)
425 // ecx = 0
426 uint32_t tpl_cpuidB0_eax;
427 TplCpuidBEbx tpl_cpuidB0_ebx;
428 uint32_t tpl_cpuidB0_ecx; // unused currently
429 uint32_t tpl_cpuidB0_edx; // unused currently
430
431 // ecx = 1
432 uint32_t tpl_cpuidB1_eax;
433 TplCpuidBEbx tpl_cpuidB1_ebx;
434 uint32_t tpl_cpuidB1_ecx; // unused currently
435 uint32_t tpl_cpuidB1_edx; // unused currently
436
437 // ecx = 2
438 uint32_t tpl_cpuidB2_eax;
439 TplCpuidBEbx tpl_cpuidB2_ebx;
440 uint32_t tpl_cpuidB2_ecx; // unused currently
441 uint32_t tpl_cpuidB2_edx; // unused currently
442
443 // cpuid function 0x80000000 // example, unused
444 uint32_t ext_max_function;
445 uint32_t ext_vendor_name_0;
446 uint32_t ext_vendor_name_1;
447 uint32_t ext_vendor_name_2;
448
449 // cpuid function 0x80000001
450 uint32_t ext_cpuid1_eax; // reserved
451 uint32_t ext_cpuid1_ebx; // reserved
452 ExtCpuid1Ecx ext_cpuid1_ecx;
453 ExtCpuid1Edx ext_cpuid1_edx;
454
455 // cpuid functions 0x80000002 thru 0x80000004: example, unused
456 uint32_t proc_name_0, proc_name_1, proc_name_2, proc_name_3;
457 uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7;
458 uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11;
459
460 // cpuid function 0x80000005 // AMD L1, Intel reserved
461 uint32_t ext_cpuid5_eax; // unused currently
462 uint32_t ext_cpuid5_ebx; // reserved
463 ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD)
464 ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD)
465
466 // cpuid function 0x80000007
467 uint32_t ext_cpuid7_eax; // reserved
468 uint32_t ext_cpuid7_ebx; // reserved
469 uint32_t ext_cpuid7_ecx; // reserved
470 ExtCpuid7Edx ext_cpuid7_edx; // tscinv
471
472 // cpuid function 0x80000008
473 uint32_t ext_cpuid8_eax; // unused currently
474 uint32_t ext_cpuid8_ebx; // reserved
475 ExtCpuid8Ecx ext_cpuid8_ecx;
476 uint32_t ext_cpuid8_edx; // reserved
477
478 // cpuid function 0x8000001E // AMD 17h
479 uint32_t ext_cpuid1E_eax;
480 ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h)
481 uint32_t ext_cpuid1E_ecx;
482 uint32_t ext_cpuid1E_edx; // unused currently
483
484 // extended control register XCR0 (the XFEATURE_ENABLED_MASK register)
485 XemXcr0Eax xem_xcr0_eax;
486 uint32_t xem_xcr0_edx; // reserved
487
488 // Space to save ymm registers after signal handle
489 int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15
490
491 // Space to save zmm registers after signal handle
492 int zmm_save[16*4]; // Save zmm0, zmm7, zmm8, zmm31
493 };
494
495 // The actual cpuid info block
496 static CpuidInfo _cpuid_info;
497
498 // Extractors and predicates
499 static uint32_t extended_cpu_family() {
500 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family;
501 result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
502 return result;
503 }
504
505 static uint32_t extended_cpu_model() {
506 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model;
507 result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
508 return result;
509 }
510
511 static uint32_t cpu_stepping() {
512 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping;
513 return result;
514 }
515
516 static uint logical_processor_count() {
517 uint result = threads_per_core();
518 return result;
519 }
520
521 static bool compute_has_intel_jcc_erratum();
522
523 static uint64_t feature_flags() {
524 uint64_t result = 0;
525 if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0)
526 result |= CPU_CX8;
527 if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
528 result |= CPU_CMOV;
529 if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0)
530 result |= CPU_FLUSH;
531 #ifdef _LP64
532 // clflush should always be available on x86_64
533 // if not we are in real trouble because we rely on it
534 // to flush the code cache.
535 assert ((result & CPU_FLUSH) != 0, "clflush should be available");
536 #endif
537 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
538 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
539 result |= CPU_FXSR;
540 // HT flag is set for multi-core processors also.
541 if (threads_per_core() > 1)
542 result |= CPU_HT;
543 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
544 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
545 result |= CPU_MMX;
546 if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
547 result |= CPU_SSE;
548 if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0)
549 result |= CPU_SSE2;
550 if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0)
551 result |= CPU_SSE3;
552 if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0)
553 result |= CPU_SSSE3;
554 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0)
555 result |= CPU_SSE4_1;
556 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0)
557 result |= CPU_SSE4_2;
558 if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
559 result |= CPU_POPCNT;
560 if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 &&
561 _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 &&
562 _cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
563 _cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
564 result |= CPU_AVX;
565 result |= CPU_VZEROUPPER;
566 if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
567 result |= CPU_AVX2;
568 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 &&
569 _cpuid_info.xem_xcr0_eax.bits.opmask != 0 &&
570 _cpuid_info.xem_xcr0_eax.bits.zmm512 != 0 &&
571 _cpuid_info.xem_xcr0_eax.bits.zmm32 != 0) {
572 result |= CPU_AVX512F;
573 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512cd != 0)
574 result |= CPU_AVX512CD;
575 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512dq != 0)
576 result |= CPU_AVX512DQ;
577 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512pf != 0)
578 result |= CPU_AVX512PF;
579 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0)
580 result |= CPU_AVX512ER;
581 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0)
582 result |= CPU_AVX512BW;
583 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0)
584 result |= CPU_AVX512VL;
585 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
586 result |= CPU_AVX512_VPOPCNTDQ;
587 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
588 result |= CPU_AVX512_VPCLMULQDQ;
589 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
590 result |= CPU_AVX512_VAES;
591 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0)
592 result |= CPU_AVX512_VNNI;
593 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0)
594 result |= CPU_AVX512_VBMI;
595 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
596 result |= CPU_AVX512_VBMI2;
597 }
598 }
599 if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0)
600 result |= CPU_HV;
601 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
602 result |= CPU_BMI1;
603 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
604 result |= CPU_TSC;
605 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
606 result |= CPU_TSCINV_BIT;
607 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
608 result |= CPU_AES;
609 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
610 result |= CPU_ERMS;
611 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
612 result |= CPU_CLMUL;
613 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
614 result |= CPU_RTM;
615 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
616 result |= CPU_ADX;
617 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
618 result |= CPU_BMI2;
619 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
620 result |= CPU_SHA;
621 if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
622 result |= CPU_FMA;
623 if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0)
624 result |= CPU_FLUSHOPT;
625
626 // AMD|Hygon features.
627 if (is_amd_family()) {
628 if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
629 (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
630 result |= CPU_3DNOW_PREFETCH;
631 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0)
632 result |= CPU_LZCNT;
633 if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
634 result |= CPU_SSE4A;
635 }
636
637 // Intel features.
638 if (is_intel()) {
639 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) {
640 result |= CPU_LZCNT;
641 }
642 if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) {
643 result |= CPU_3DNOW_PREFETCH;
644 }
645 if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) {
646 result |= CPU_CLWB;
647 }
648 }
649
650 // ZX features.
651 if (is_zx()) {
652 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) {
653 result |= CPU_LZCNT;
654 }
655 if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) {
656 result |= CPU_3DNOW_PREFETCH;
657 }
658 }
659
660 // Composite features.
661 if (supports_tscinv_bit() &&
662 ((is_amd_family() && !is_amd_Barcelona()) ||
663 is_intel_tsc_synched_at_init())) {
664 result |= CPU_TSCINV;
665 }
666
667 return result;
668 }
669
670 static bool os_supports_avx_vectors() {
671 bool retVal = false;
672 int nreg = 2 LP64_ONLY(+2);
673 if (supports_evex()) {
674 // Verify that OS save/restore all bits of EVEX registers
675 // during signal processing.
676 retVal = true;
677 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
678 if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
679 retVal = false;
680 break;
681 }
682 }
683 } else if (supports_avx()) {
684 // Verify that OS save/restore all bits of AVX registers
685 // during signal processing.
686 retVal = true;
687 for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
688 if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
689 retVal = false;
690 break;
691 }
692 }
693 // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen
694 if (retVal == false) {
695 // Verify that OS save/restore all bits of EVEX registers
696 // during signal processing.
697 retVal = true;
698 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
699 if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
700 retVal = false;
701 break;
702 }
703 }
704 }
705 }
706 return retVal;
707 }
708
709 static void get_processor_features();
710
711 public:
712 // Offsets for cpuid asm stub
713 static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); }
714 static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); }
715 static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); }
716 static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); }
717 static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); }
718 static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); }
719 static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); }
720 static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); }
721 static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); }
722 static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); }
723 static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); }
724 static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); }
725 static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); }
726 static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); }
727 static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); }
728
729 // The value used to check ymm register after signal handle
730 static int ymm_test_value() { return 0xCAFEBABE; }
731
732 static void get_cpu_info_wrapper();
733 static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; }
734 static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; }
735 static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; }
736 static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; }
737
738 static void clean_cpuFeatures() { _features = 0; }
739 static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX | CPU_VZEROUPPER ); }
740 static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 | CPU_VZEROUPPER ); }
741
742
743 // Initialization
744 static void initialize();
745
746 // Override Abstract_VM_Version implementation
747 static void print_platform_virtualization_info(outputStream*);
748
749 // Override Abstract_VM_Version implementation
750 static bool use_biased_locking();
751
752 // Asserts
753 static void assert_is_initialized() {
754 assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
755 }
756
757 //
758 // Processor family:
759 // 3 - 386
760 // 4 - 486
761 // 5 - Pentium
762 // 6 - PentiumPro, Pentium II, Celeron, Xeon, Pentium III, Athlon,
763 // Pentium M, Core Solo, Core Duo, Core2 Duo
764 // family 6 model: 9, 13, 14, 15
765 // 0x0f - Pentium 4, Opteron
766 //
767 // Note: The cpu family should be used to select between
768 // instruction sequences which are valid on all Intel
769 // processors. Use the feature test functions below to
770 // determine whether a particular instruction is supported.
771 //
772 static int cpu_family() { return _cpu;}
773 static bool is_P6() { return cpu_family() >= 6; }
774 static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
775 static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH'
776 static bool is_amd_family() { return is_amd() || is_hygon(); }
777 static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
778 static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS '
779 static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
780 static bool is_knights_family() { return UseKNLSetting || ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
781
782 static bool supports_processor_topology() {
783 return (_cpuid_info.std_max_function >= 0xB) &&
784 // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level.
785 // Some cpus have max cpuid >= 0xB but do not support processor topology.
786 (((_cpuid_info.tpl_cpuidB0_eax & 0x1f) | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0);
787 }
788
789 static uint cores_per_cpu() {
790 uint result = 1;
791 if (is_intel()) {
792 bool supports_topology = supports_processor_topology();
793 if (supports_topology) {
794 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
795 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
796 }
797 if (!supports_topology || result == 0) {
798 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
799 }
800 } else if (is_amd_family()) {
801 result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
802 } else if (is_zx()) {
803 bool supports_topology = supports_processor_topology();
804 if (supports_topology) {
805 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
806 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
807 }
808 if (!supports_topology || result == 0) {
809 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
810 }
811 }
812 return result;
813 }
814
815 static uint threads_per_core() {
816 uint result = 1;
817 if (is_intel() && supports_processor_topology()) {
818 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
819 } else if (is_zx() && supports_processor_topology()) {
820 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
821 } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
822 if (cpu_family() >= 0x17) {
823 result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
824 } else {
825 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
826 cores_per_cpu();
827 }
828 }
829 return (result == 0 ? 1 : result);
830 }
831
832 static intx L1_line_size() {
833 intx result = 0;
834 if (is_intel()) {
835 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
836 } else if (is_amd_family()) {
837 result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
838 } else if (is_zx()) {
839 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
840 }
841 if (result < 32) // not defined ?
842 result = 32; // 32 bytes by default on x86 and other x64
843 return result;
844 }
845
846 static intx prefetch_data_size() {
847 return L1_line_size();
848 }
849
850 //
851 // Feature identification
852 //
853 static bool supports_cpuid() { return _features != 0; }
854 static bool supports_cmpxchg8() { return (_features & CPU_CX8) != 0; }
855 static bool supports_cmov() { return (_features & CPU_CMOV) != 0; }
856 static bool supports_fxsr() { return (_features & CPU_FXSR) != 0; }
857 static bool supports_ht() { return (_features & CPU_HT) != 0; }
858 static bool supports_mmx() { return (_features & CPU_MMX) != 0; }
859 static bool supports_sse() { return (_features & CPU_SSE) != 0; }
860 static bool supports_sse2() { return (_features & CPU_SSE2) != 0; }
861 static bool supports_sse3() { return (_features & CPU_SSE3) != 0; }
862 static bool supports_ssse3() { return (_features & CPU_SSSE3)!= 0; }
863 static bool supports_sse4_1() { return (_features & CPU_SSE4_1) != 0; }
864 static bool supports_sse4_2() { return (_features & CPU_SSE4_2) != 0; }
865 static bool supports_popcnt() { return (_features & CPU_POPCNT) != 0; }
866 static bool supports_avx() { return (_features & CPU_AVX) != 0; }
867 static bool supports_avx2() { return (_features & CPU_AVX2) != 0; }
868 static bool supports_tsc() { return (_features & CPU_TSC) != 0; }
869 static bool supports_aes() { return (_features & CPU_AES) != 0; }
870 static bool supports_erms() { return (_features & CPU_ERMS) != 0; }
871 static bool supports_clmul() { return (_features & CPU_CLMUL) != 0; }
872 static bool supports_rtm() { return (_features & CPU_RTM) != 0; }
873 static bool supports_bmi1() { return (_features & CPU_BMI1) != 0; }
874 static bool supports_bmi2() { return (_features & CPU_BMI2) != 0; }
875 static bool supports_adx() { return (_features & CPU_ADX) != 0; }
876 static bool supports_evex() { return (_features & CPU_AVX512F) != 0; }
877 static bool supports_avx512dq() { return (_features & CPU_AVX512DQ) != 0; }
878 static bool supports_avx512pf() { return (_features & CPU_AVX512PF) != 0; }
879 static bool supports_avx512er() { return (_features & CPU_AVX512ER) != 0; }
880 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; }
881 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; }
882 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; }
883 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); }
884 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); }
885 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() &&
886 supports_avx512bw() && supports_avx512dq()); }
887 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); }
888 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); }
889 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); }
890 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); }
891 static bool supports_sha() { return (_features & CPU_SHA) != 0; }
892 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); }
893 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; }
894 static bool supports_avx512_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; }
895 static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; }
896 static bool supports_avx512_vaes() { return (_features & CPU_AVX512_VAES) != 0; }
897 static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; }
898 static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; }
899 static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; }
900 static bool supports_hv() { return (_features & CPU_HV) != 0; }
901
902 // Intel features
903 static bool is_intel_family_core() { return is_intel() &&
904 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; }
905
906 static bool is_intel_skylake() { return is_intel_family_core() &&
907 extended_cpu_model() == CPU_MODEL_SKYLAKE; }
908
909 static bool is_intel_tsc_synched_at_init() {
910 if (is_intel_family_core()) {
911 uint32_t ext_model = extended_cpu_model();
912 if (ext_model == CPU_MODEL_NEHALEM_EP ||
913 ext_model == CPU_MODEL_WESTMERE_EP ||
914 ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
915 ext_model == CPU_MODEL_IVYBRIDGE_EP) {
916 // <= 2-socket invariant tsc support. EX versions are usually used
917 // in > 2-socket systems and likely don't synchronize tscs at
918 // initialization.
919 // Code that uses tsc values must be prepared for them to arbitrarily
920 // jump forward or backward.
921 return true;
922 }
923 }
924 return false;
925 }
926
927 // This checks if the JVM is potentially affected by an erratum on Intel CPUs (SKX102)
928 // that causes unpredictable behaviour when jcc crosses 64 byte boundaries. Its microcode
929 // mitigation causes regressions when jumps or fused conditional branches cross or end at
930 // 32 byte boundaries.
931 static bool has_intel_jcc_erratum() { return _has_intel_jcc_erratum; }
932
933 // AMD features
934 static bool supports_3dnow_prefetch() { return (_features & CPU_3DNOW_PREFETCH) != 0; }
935 static bool supports_lzcnt() { return (_features & CPU_LZCNT) != 0; }
936 static bool supports_sse4a() { return (_features & CPU_SSE4A) != 0; }
937
938 static bool is_amd_Barcelona() { return is_amd() &&
939 extended_cpu_family() == CPU_FAMILY_AMD_11H; }
940
941 // Intel and AMD newer cores support fast timestamps well
942 static bool supports_tscinv_bit() {
943 return (_features & CPU_TSCINV_BIT) != 0;
944 }
945 static bool supports_tscinv() {
946 return (_features & CPU_TSCINV) != 0;
947 }
948
949 // Intel Core and newer cpus have fast IDIV instruction (excluding Atom).
950 static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 &&
951 supports_sse3() && _model != 0x1C; }
952
953 static bool supports_compare_and_exchange() { return true; }
954
955 static intx allocate_prefetch_distance(bool use_watermark_prefetch) {
956 // Hardware prefetching (distance/size in bytes):
957 // Pentium 3 - 64 / 32
958 // Pentium 4 - 256 / 128
959 // Athlon - 64 / 32 ????
960 // Opteron - 128 / 64 only when 2 sequential cache lines accessed
961 // Core - 128 / 64
962 //
963 // Software prefetching (distance in bytes / instruction with best score):
964 // Pentium 3 - 128 / prefetchnta
965 // Pentium 4 - 512 / prefetchnta
966 // Athlon - 128 / prefetchnta
967 // Opteron - 256 / prefetchnta
968 // Core - 256 / prefetchnta
969 // It will be used only when AllocatePrefetchStyle > 0
970
971 if (is_amd_family()) { // AMD | Hygon
972 if (supports_sse2()) {
973 return 256; // Opteron
974 } else {
975 return 128; // Athlon
976 }
977 } else { // Intel
978 if (supports_sse3() && cpu_family() == 6) {
979 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
980 return 192;
981 } else if (use_watermark_prefetch) { // watermark prefetching on Core
982 #ifdef _LP64
983 return 384;
984 #else
985 return 320;
986 #endif
987 }
988 }
989 if (supports_sse2()) {
990 if (cpu_family() == 6) {
991 return 256; // Pentium M, Core, Core2
992 } else {
993 return 512; // Pentium 4
994 }
995 } else {
996 return 128; // Pentium 3 (and all other old CPUs)
997 }
998 }
999 }
1000
1001 // SSE2 and later processors implement a 'pause' instruction
1002 // that can be used for efficient implementation of
1003 // the intrinsic for java.lang.Thread.onSpinWait()
1004 static bool supports_on_spin_wait() { return supports_sse2(); }
1005
1006 // x86_64 supports fast class initialization checks for static methods.
1007 static bool supports_fast_class_init_checks() {
1008 return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32
1009 }
1010
1011 constexpr static bool supports_stack_watermark_barrier() {
1012 return true;
1013 }
1014
1015 // there are several insns to force cache line sync to memory which
1016 // we can use to ensure mapped non-volatile memory is up to date with
1017 // pending in-cache changes.
1018 //
1019 // 64 bit cpus always support clflush which writes back and evicts
1020 // on 32 bit cpus support is recorded via a feature flag
1021 //
1022 // clflushopt is optional and acts like clflush except it does
1023 // not synchronize with other memory ops. it needs a preceding
1024 // and trailing StoreStore fence
1025 //
1026 // clwb is an optional intel-specific instruction which
1027 // writes back without evicting the line. it also does not
1028 // synchronize with other memory ops. so, it needs preceding
1029 // and trailing StoreStore fences.
1030
1031 #ifdef _LP64
1032 static bool supports_clflush() {
1033 // clflush should always be available on x86_64
1034 // if not we are in real trouble because we rely on it
1035 // to flush the code cache.
1036 // Unfortunately, Assembler::clflush is currently called as part
1037 // of generation of the code cache flush routine. This happens
1038 // under Universe::init before the processor features are set
1039 // up. Assembler::flush calls this routine to check that clflush
1040 // is allowed. So, we give the caller a free pass if Universe init
1041 // is still in progress.
1042 assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available");
1043 return true;
1044 }
1045 #else
1046 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); }
1047 #endif // _LP64
1048 // Note: CPU_FLUSHOPT and CPU_CLWB bits should always be zero for 32-bit
1049 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); }
1050 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); }
1051
1052 #ifdef __APPLE__
1053 // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64)
1054 static bool is_cpu_emulated();
1055 #endif
1056
1057 // support functions for virtualization detection
1058 private:
1059 static void check_virtualizations();
1060 };
1061
1062 #endif // CPU_X86_VM_VERSION_X86_HPP
--- EOF ---