1 /* 2 * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_VERSION_X86_HPP 26 #define CPU_X86_VM_VERSION_X86_HPP 27 28 #include "runtime/abstract_vm_version.hpp" 29 #include "utilities/macros.hpp" 30 #include "utilities/sizes.hpp" 31 32 class VM_Version : public Abstract_VM_Version { 33 friend class VMStructs; 34 friend class JVMCIVMStructs; 35 36 public: 37 // cpuid result register layouts. These are all unions of a uint32_t 38 // (in case anyone wants access to the register as a whole) and a bitfield. 39 40 union StdCpuid1Eax { 41 uint32_t value; 42 struct { 43 uint32_t stepping : 4, 44 model : 4, 45 family : 4, 46 proc_type : 2, 47 : 2, 48 ext_model : 4, 49 ext_family : 8, 50 : 4; 51 } bits; 52 }; 53 54 union StdCpuid1Ebx { // example, unused 55 uint32_t value; 56 struct { 57 uint32_t brand_id : 8, 58 clflush_size : 8, 59 threads_per_cpu : 8, 60 apic_id : 8; 61 } bits; 62 }; 63 64 union StdCpuid1Ecx { 65 uint32_t value; 66 struct { 67 uint32_t sse3 : 1, 68 clmul : 1, 69 : 1, 70 monitor : 1, 71 : 1, 72 vmx : 1, 73 : 1, 74 est : 1, 75 : 1, 76 ssse3 : 1, 77 cid : 1, 78 : 1, 79 fma : 1, 80 cmpxchg16: 1, 81 : 4, 82 dca : 1, 83 sse4_1 : 1, 84 sse4_2 : 1, 85 : 2, 86 popcnt : 1, 87 : 1, 88 aes : 1, 89 : 1, 90 osxsave : 1, 91 avx : 1, 92 : 2, 93 hv : 1; 94 } bits; 95 }; 96 97 union StdCpuid1Edx { 98 uint32_t value; 99 struct { 100 uint32_t : 4, 101 tsc : 1, 102 : 3, 103 cmpxchg8 : 1, 104 : 6, 105 cmov : 1, 106 : 3, 107 clflush : 1, 108 : 3, 109 mmx : 1, 110 fxsr : 1, 111 sse : 1, 112 sse2 : 1, 113 : 1, 114 ht : 1, 115 : 3; 116 } bits; 117 }; 118 119 union DcpCpuid4Eax { 120 uint32_t value; 121 struct { 122 uint32_t cache_type : 5, 123 : 21, 124 cores_per_cpu : 6; 125 } bits; 126 }; 127 128 union DcpCpuid4Ebx { 129 uint32_t value; 130 struct { 131 uint32_t L1_line_size : 12, 132 partitions : 10, 133 associativity : 10; 134 } bits; 135 }; 136 137 union TplCpuidBEbx { 138 uint32_t value; 139 struct { 140 uint32_t logical_cpus : 16, 141 : 16; 142 } bits; 143 }; 144 145 union ExtCpuid1Ecx { 146 uint32_t value; 147 struct { 148 uint32_t LahfSahf : 1, 149 CmpLegacy : 1, 150 : 3, 151 lzcnt : 1, 152 sse4a : 1, 153 misalignsse : 1, 154 prefetchw : 1, 155 : 23; 156 } bits; 157 }; 158 159 union ExtCpuid1Edx { 160 uint32_t value; 161 struct { 162 uint32_t : 22, 163 mmx_amd : 1, 164 mmx : 1, 165 fxsr : 1, 166 : 4, 167 long_mode : 1, 168 tdnow2 : 1, 169 tdnow : 1; 170 } bits; 171 }; 172 173 union ExtCpuid5Ex { 174 uint32_t value; 175 struct { 176 uint32_t L1_line_size : 8, 177 L1_tag_lines : 8, 178 L1_assoc : 8, 179 L1_size : 8; 180 } bits; 181 }; 182 183 union ExtCpuid7Edx { 184 uint32_t value; 185 struct { 186 uint32_t : 8, 187 tsc_invariance : 1, 188 : 23; 189 } bits; 190 }; 191 192 union ExtCpuid8Ecx { 193 uint32_t value; 194 struct { 195 uint32_t cores_per_cpu : 8, 196 : 24; 197 } bits; 198 }; 199 200 union SefCpuid7Eax { 201 uint32_t value; 202 }; 203 204 union SefCpuid7Ebx { 205 uint32_t value; 206 struct { 207 uint32_t fsgsbase : 1, 208 : 2, 209 bmi1 : 1, 210 : 1, 211 avx2 : 1, 212 : 2, 213 bmi2 : 1, 214 erms : 1, 215 : 1, 216 rtm : 1, 217 : 4, 218 avx512f : 1, 219 avx512dq : 1, 220 : 1, 221 adx : 1, 222 : 3, 223 clflushopt : 1, 224 clwb : 1, 225 : 1, 226 avx512pf : 1, 227 avx512er : 1, 228 avx512cd : 1, 229 sha : 1, 230 avx512bw : 1, 231 avx512vl : 1; 232 } bits; 233 }; 234 235 union SefCpuid7Ecx { 236 uint32_t value; 237 struct { 238 uint32_t prefetchwt1 : 1, 239 avx512_vbmi : 1, 240 umip : 1, 241 pku : 1, 242 ospke : 1, 243 : 1, 244 avx512_vbmi2 : 1, 245 : 1, 246 gfni : 1, 247 vaes : 1, 248 avx512_vpclmulqdq : 1, 249 avx512_vnni : 1, 250 avx512_bitalg : 1, 251 : 1, 252 avx512_vpopcntdq : 1, 253 : 17; 254 } bits; 255 }; 256 257 union SefCpuid7Edx { 258 uint32_t value; 259 struct { 260 uint32_t : 2, 261 avx512_4vnniw : 1, 262 avx512_4fmaps : 1, 263 : 10, 264 serialize : 1, 265 : 17; 266 } bits; 267 }; 268 269 union ExtCpuid1EEbx { 270 uint32_t value; 271 struct { 272 uint32_t : 8, 273 threads_per_core : 8, 274 : 16; 275 } bits; 276 }; 277 278 union XemXcr0Eax { 279 uint32_t value; 280 struct { 281 uint32_t x87 : 1, 282 sse : 1, 283 ymm : 1, 284 bndregs : 1, 285 bndcsr : 1, 286 opmask : 1, 287 zmm512 : 1, 288 zmm32 : 1, 289 : 24; 290 } bits; 291 }; 292 293 protected: 294 static int _cpu; 295 static int _model; 296 static int _stepping; 297 298 static bool _has_intel_jcc_erratum; 299 300 static address _cpuinfo_segv_addr; // address of instruction which causes SEGV 301 static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV 302 303 enum Feature_Flag : uint64_t { 304 #define CPU_FEATURE_FLAGS(decl) \ 305 decl(CX8, "cx8", 0) /* next bits are from cpuid 1 (EDX) */ \ 306 decl(CMOV, "cmov", 1) \ 307 decl(FXSR, "fxsr", 2) \ 308 decl(HT, "ht", 3) \ 309 \ 310 decl(MMX, "mmx", 4) \ 311 decl(3DNOW_PREFETCH, "3dnowpref", 5) /* Processor supports 3dnow prefetch and prefetchw instructions */ \ 312 /* may not necessarily support other 3dnow instructions */ \ 313 decl(SSE, "sse", 6) \ 314 decl(SSE2, "sse2", 7) \ 315 \ 316 decl(SSE3, "sse3", 8 ) /* SSE3 comes from cpuid 1 (ECX) */ \ 317 decl(SSSE3, "ssse3", 9 ) \ 318 decl(SSE4A, "sse4a", 10) \ 319 decl(SSE4_1, "sse4.1", 11) \ 320 \ 321 decl(SSE4_2, "sse4.2", 12) \ 322 decl(POPCNT, "popcnt", 13) \ 323 decl(LZCNT, "lzcnt", 14) \ 324 decl(TSC, "tsc", 15) \ 325 \ 326 decl(TSCINV_BIT, "tscinvbit", 16) \ 327 decl(TSCINV, "tscinv", 17) \ 328 decl(AVX, "avx", 18) \ 329 decl(AVX2, "avx2", 19) \ 330 \ 331 decl(AES, "aes", 20) \ 332 decl(ERMS, "erms", 21) /* enhanced 'rep movsb/stosb' instructions */ \ 333 decl(CLMUL, "clmul", 22) /* carryless multiply for CRC */ \ 334 decl(BMI1, "bmi1", 23) \ 335 \ 336 decl(BMI2, "bmi2", 24) \ 337 decl(RTM, "rtm", 25) /* Restricted Transactional Memory instructions */ \ 338 decl(ADX, "adx", 26) \ 339 decl(AVX512F, "avx512f", 27) /* AVX 512bit foundation instructions */ \ 340 \ 341 decl(AVX512DQ, "avx512dq", 28) \ 342 decl(AVX512PF, "avx512pf", 29) \ 343 decl(AVX512ER, "avx512er", 30) \ 344 decl(AVX512CD, "avx512cd", 31) \ 345 \ 346 decl(AVX512BW, "avx512bw", 32) /* Byte and word vector instructions */ \ 347 decl(AVX512VL, "avx512vl", 33) /* EVEX instructions with smaller vector length */ \ 348 decl(SHA, "sha", 34) /* SHA instructions */ \ 349 decl(FMA, "fma", 35) /* FMA instructions */ \ 350 \ 351 decl(VZEROUPPER, "vzeroupper", 36) /* Vzeroupper instruction */ \ 352 decl(AVX512_VPOPCNTDQ, "avx512_vpopcntdq", 37) /* Vector popcount */ \ 353 decl(AVX512_VPCLMULQDQ, "avx512_vpclmulqdq", 38) /* Vector carryless multiplication */ \ 354 decl(AVX512_VAES, "avx512_vaes", 39) /* Vector AES instruction */ \ 355 \ 356 decl(AVX512_VNNI, "avx512_vnni", 40) /* Vector Neural Network Instructions */ \ 357 decl(FLUSH, "clflush", 41) /* flush instruction */ \ 358 decl(FLUSHOPT, "clflushopt", 42) /* flusopth instruction */ \ 359 decl(CLWB, "clwb", 43) /* clwb instruction */ \ 360 \ 361 decl(AVX512_VBMI2, "avx512_vbmi2", 44) /* VBMI2 shift left double instructions */ \ 362 decl(AVX512_VBMI, "avx512_vbmi", 45) /* Vector BMI instructions */ \ 363 decl(HV, "hv", 46) /* Hypervisor instructions */ \ 364 decl(SERIALIZE, "serialize", 47) /* CPU SERIALIZE */ 365 366 #define DECLARE_CPU_FEATURE_FLAG(id, name, bit) CPU_##id = (1ULL << bit), 367 CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_FLAG) 368 #undef DECLARE_CPU_FEATURE_FLAG 369 }; 370 371 static const char* _features_names[]; 372 373 enum Extended_Family { 374 // AMD 375 CPU_FAMILY_AMD_11H = 0x11, 376 // ZX 377 CPU_FAMILY_ZX_CORE_F6 = 6, 378 CPU_FAMILY_ZX_CORE_F7 = 7, 379 // Intel 380 CPU_FAMILY_INTEL_CORE = 6, 381 CPU_MODEL_NEHALEM = 0x1e, 382 CPU_MODEL_NEHALEM_EP = 0x1a, 383 CPU_MODEL_NEHALEM_EX = 0x2e, 384 CPU_MODEL_WESTMERE = 0x25, 385 CPU_MODEL_WESTMERE_EP = 0x2c, 386 CPU_MODEL_WESTMERE_EX = 0x2f, 387 CPU_MODEL_SANDYBRIDGE = 0x2a, 388 CPU_MODEL_SANDYBRIDGE_EP = 0x2d, 389 CPU_MODEL_IVYBRIDGE_EP = 0x3a, 390 CPU_MODEL_HASWELL_E3 = 0x3c, 391 CPU_MODEL_HASWELL_E7 = 0x3f, 392 CPU_MODEL_BROADWELL = 0x3d, 393 CPU_MODEL_SKYLAKE = 0x55 394 }; 395 396 // cpuid information block. All info derived from executing cpuid with 397 // various function numbers is stored here. Intel and AMD info is 398 // merged in this block: accessor methods disentangle it. 399 // 400 // The info block is laid out in subblocks of 4 dwords corresponding to 401 // eax, ebx, ecx and edx, whether or not they contain anything useful. 402 struct CpuidInfo { 403 // cpuid function 0 404 uint32_t std_max_function; 405 uint32_t std_vendor_name_0; 406 uint32_t std_vendor_name_1; 407 uint32_t std_vendor_name_2; 408 409 // cpuid function 1 410 StdCpuid1Eax std_cpuid1_eax; 411 StdCpuid1Ebx std_cpuid1_ebx; 412 StdCpuid1Ecx std_cpuid1_ecx; 413 StdCpuid1Edx std_cpuid1_edx; 414 415 // cpuid function 4 (deterministic cache parameters) 416 DcpCpuid4Eax dcp_cpuid4_eax; 417 DcpCpuid4Ebx dcp_cpuid4_ebx; 418 uint32_t dcp_cpuid4_ecx; // unused currently 419 uint32_t dcp_cpuid4_edx; // unused currently 420 421 // cpuid function 7 (structured extended features) 422 SefCpuid7Eax sef_cpuid7_eax; 423 SefCpuid7Ebx sef_cpuid7_ebx; 424 SefCpuid7Ecx sef_cpuid7_ecx; 425 SefCpuid7Edx sef_cpuid7_edx; 426 427 // cpuid function 0xB (processor topology) 428 // ecx = 0 429 uint32_t tpl_cpuidB0_eax; 430 TplCpuidBEbx tpl_cpuidB0_ebx; 431 uint32_t tpl_cpuidB0_ecx; // unused currently 432 uint32_t tpl_cpuidB0_edx; // unused currently 433 434 // ecx = 1 435 uint32_t tpl_cpuidB1_eax; 436 TplCpuidBEbx tpl_cpuidB1_ebx; 437 uint32_t tpl_cpuidB1_ecx; // unused currently 438 uint32_t tpl_cpuidB1_edx; // unused currently 439 440 // ecx = 2 441 uint32_t tpl_cpuidB2_eax; 442 TplCpuidBEbx tpl_cpuidB2_ebx; 443 uint32_t tpl_cpuidB2_ecx; // unused currently 444 uint32_t tpl_cpuidB2_edx; // unused currently 445 446 // cpuid function 0x80000000 // example, unused 447 uint32_t ext_max_function; 448 uint32_t ext_vendor_name_0; 449 uint32_t ext_vendor_name_1; 450 uint32_t ext_vendor_name_2; 451 452 // cpuid function 0x80000001 453 uint32_t ext_cpuid1_eax; // reserved 454 uint32_t ext_cpuid1_ebx; // reserved 455 ExtCpuid1Ecx ext_cpuid1_ecx; 456 ExtCpuid1Edx ext_cpuid1_edx; 457 458 // cpuid functions 0x80000002 thru 0x80000004: example, unused 459 uint32_t proc_name_0, proc_name_1, proc_name_2, proc_name_3; 460 uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7; 461 uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11; 462 463 // cpuid function 0x80000005 // AMD L1, Intel reserved 464 uint32_t ext_cpuid5_eax; // unused currently 465 uint32_t ext_cpuid5_ebx; // reserved 466 ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD) 467 ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD) 468 469 // cpuid function 0x80000007 470 uint32_t ext_cpuid7_eax; // reserved 471 uint32_t ext_cpuid7_ebx; // reserved 472 uint32_t ext_cpuid7_ecx; // reserved 473 ExtCpuid7Edx ext_cpuid7_edx; // tscinv 474 475 // cpuid function 0x80000008 476 uint32_t ext_cpuid8_eax; // unused currently 477 uint32_t ext_cpuid8_ebx; // reserved 478 ExtCpuid8Ecx ext_cpuid8_ecx; 479 uint32_t ext_cpuid8_edx; // reserved 480 481 // cpuid function 0x8000001E // AMD 17h 482 uint32_t ext_cpuid1E_eax; 483 ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h) 484 uint32_t ext_cpuid1E_ecx; 485 uint32_t ext_cpuid1E_edx; // unused currently 486 487 // extended control register XCR0 (the XFEATURE_ENABLED_MASK register) 488 XemXcr0Eax xem_xcr0_eax; 489 uint32_t xem_xcr0_edx; // reserved 490 491 // Space to save ymm registers after signal handle 492 int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15 493 494 // Space to save zmm registers after signal handle 495 int zmm_save[16*4]; // Save zmm0, zmm7, zmm8, zmm31 496 }; 497 498 // The actual cpuid info block 499 static CpuidInfo _cpuid_info; 500 501 // Extractors and predicates 502 static uint32_t extended_cpu_family() { 503 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family; 504 result += _cpuid_info.std_cpuid1_eax.bits.ext_family; 505 return result; 506 } 507 508 static uint32_t extended_cpu_model() { 509 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model; 510 result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4; 511 return result; 512 } 513 514 static uint32_t cpu_stepping() { 515 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping; 516 return result; 517 } 518 519 static uint logical_processor_count() { 520 uint result = threads_per_core(); 521 return result; 522 } 523 524 static bool compute_has_intel_jcc_erratum(); 525 526 static uint64_t feature_flags() { 527 uint64_t result = 0; 528 if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0) 529 result |= CPU_CX8; 530 if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) 531 result |= CPU_CMOV; 532 if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0) 533 result |= CPU_FLUSH; 534 #ifdef _LP64 535 // clflush should always be available on x86_64 536 // if not we are in real trouble because we rely on it 537 // to flush the code cache. 538 assert ((result & CPU_FLUSH) != 0, "clflush should be available"); 539 #endif 540 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() && 541 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)) 542 result |= CPU_FXSR; 543 // HT flag is set for multi-core processors also. 544 if (threads_per_core() > 1) 545 result |= CPU_HT; 546 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() && 547 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)) 548 result |= CPU_MMX; 549 if (_cpuid_info.std_cpuid1_edx.bits.sse != 0) 550 result |= CPU_SSE; 551 if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0) 552 result |= CPU_SSE2; 553 if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0) 554 result |= CPU_SSE3; 555 if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0) 556 result |= CPU_SSSE3; 557 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0) 558 result |= CPU_SSE4_1; 559 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0) 560 result |= CPU_SSE4_2; 561 if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0) 562 result |= CPU_POPCNT; 563 if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 && 564 _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 && 565 _cpuid_info.xem_xcr0_eax.bits.sse != 0 && 566 _cpuid_info.xem_xcr0_eax.bits.ymm != 0) { 567 result |= CPU_AVX; 568 result |= CPU_VZEROUPPER; 569 if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0) 570 result |= CPU_AVX2; 571 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 && 572 _cpuid_info.xem_xcr0_eax.bits.opmask != 0 && 573 _cpuid_info.xem_xcr0_eax.bits.zmm512 != 0 && 574 _cpuid_info.xem_xcr0_eax.bits.zmm32 != 0) { 575 result |= CPU_AVX512F; 576 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512cd != 0) 577 result |= CPU_AVX512CD; 578 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512dq != 0) 579 result |= CPU_AVX512DQ; 580 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512pf != 0) 581 result |= CPU_AVX512PF; 582 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0) 583 result |= CPU_AVX512ER; 584 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0) 585 result |= CPU_AVX512BW; 586 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0) 587 result |= CPU_AVX512VL; 588 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0) 589 result |= CPU_AVX512_VPOPCNTDQ; 590 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0) 591 result |= CPU_AVX512_VPCLMULQDQ; 592 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0) 593 result |= CPU_AVX512_VAES; 594 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0) 595 result |= CPU_AVX512_VNNI; 596 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0) 597 result |= CPU_AVX512_VBMI; 598 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0) 599 result |= CPU_AVX512_VBMI2; 600 } 601 } 602 if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0) 603 result |= CPU_HV; 604 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0) 605 result |= CPU_BMI1; 606 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0) 607 result |= CPU_TSC; 608 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0) 609 result |= CPU_TSCINV_BIT; 610 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0) 611 result |= CPU_AES; 612 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0) 613 result |= CPU_ERMS; 614 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0) 615 result |= CPU_CLMUL; 616 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0) 617 result |= CPU_RTM; 618 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0) 619 result |= CPU_ADX; 620 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0) 621 result |= CPU_BMI2; 622 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0) 623 result |= CPU_SHA; 624 if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0) 625 result |= CPU_FMA; 626 if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0) 627 result |= CPU_FLUSHOPT; 628 629 // AMD|Hygon features. 630 if (is_amd_family()) { 631 if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || 632 (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) 633 result |= CPU_3DNOW_PREFETCH; 634 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) 635 result |= CPU_LZCNT; 636 if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) 637 result |= CPU_SSE4A; 638 } 639 640 // Intel features. 641 if (is_intel()) { 642 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) { 643 result |= CPU_LZCNT; 644 } 645 if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) { 646 result |= CPU_3DNOW_PREFETCH; 647 } 648 if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) { 649 result |= CPU_CLWB; 650 } 651 if (_cpuid_info.sef_cpuid7_edx.bits.serialize != 0) 652 result |= CPU_SERIALIZE; 653 } 654 655 // ZX features. 656 if (is_zx()) { 657 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) { 658 result |= CPU_LZCNT; 659 } 660 if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) { 661 result |= CPU_3DNOW_PREFETCH; 662 } 663 } 664 665 // Composite features. 666 if (supports_tscinv_bit() && 667 ((is_amd_family() && !is_amd_Barcelona()) || 668 is_intel_tsc_synched_at_init())) { 669 result |= CPU_TSCINV; 670 } 671 672 return result; 673 } 674 675 static bool os_supports_avx_vectors() { 676 bool retVal = false; 677 int nreg = 2 LP64_ONLY(+2); 678 if (supports_evex()) { 679 // Verify that OS save/restore all bits of EVEX registers 680 // during signal processing. 681 retVal = true; 682 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 683 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 684 retVal = false; 685 break; 686 } 687 } 688 } else if (supports_avx()) { 689 // Verify that OS save/restore all bits of AVX registers 690 // during signal processing. 691 retVal = true; 692 for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register 693 if (_cpuid_info.ymm_save[i] != ymm_test_value()) { 694 retVal = false; 695 break; 696 } 697 } 698 // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen 699 if (retVal == false) { 700 // Verify that OS save/restore all bits of EVEX registers 701 // during signal processing. 702 retVal = true; 703 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 704 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 705 retVal = false; 706 break; 707 } 708 } 709 } 710 } 711 return retVal; 712 } 713 714 static void get_processor_features(); 715 716 public: 717 // Offsets for cpuid asm stub 718 static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); } 719 static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); } 720 static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); } 721 static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); } 722 static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } 723 static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } 724 static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); } 725 static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } 726 static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); } 727 static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } 728 static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } 729 static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } 730 static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); } 731 static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); } 732 static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); } 733 734 // The value used to check ymm register after signal handle 735 static int ymm_test_value() { return 0xCAFEBABE; } 736 737 static void get_cpu_info_wrapper(); 738 static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; } 739 static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; } 740 static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; } 741 static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; } 742 743 static void clean_cpuFeatures() { _features = 0; } 744 static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX | CPU_VZEROUPPER ); } 745 static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 | CPU_VZEROUPPER ); } 746 747 748 // Initialization 749 static void initialize(); 750 751 // Override Abstract_VM_Version implementation 752 static void print_platform_virtualization_info(outputStream*); 753 754 // Asserts 755 static void assert_is_initialized() { 756 assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized"); 757 } 758 759 // 760 // Processor family: 761 // 3 - 386 762 // 4 - 486 763 // 5 - Pentium 764 // 6 - PentiumPro, Pentium II, Celeron, Xeon, Pentium III, Athlon, 765 // Pentium M, Core Solo, Core Duo, Core2 Duo 766 // family 6 model: 9, 13, 14, 15 767 // 0x0f - Pentium 4, Opteron 768 // 769 // Note: The cpu family should be used to select between 770 // instruction sequences which are valid on all Intel 771 // processors. Use the feature test functions below to 772 // determine whether a particular instruction is supported. 773 // 774 static int cpu_family() { return _cpu;} 775 static bool is_P6() { return cpu_family() >= 6; } 776 static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' 777 static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH' 778 static bool is_amd_family() { return is_amd() || is_hygon(); } 779 static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' 780 static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS ' 781 static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton 782 static bool is_knights_family() { return UseKNLSetting || ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi 783 784 static bool supports_processor_topology() { 785 return (_cpuid_info.std_max_function >= 0xB) && 786 // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level. 787 // Some cpus have max cpuid >= 0xB but do not support processor topology. 788 (((_cpuid_info.tpl_cpuidB0_eax & 0x1f) | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0); 789 } 790 791 static uint cores_per_cpu() { 792 uint result = 1; 793 if (is_intel()) { 794 bool supports_topology = supports_processor_topology(); 795 if (supports_topology) { 796 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 797 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 798 } 799 if (!supports_topology || result == 0) { 800 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 801 } 802 } else if (is_amd_family()) { 803 result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); 804 } else if (is_zx()) { 805 bool supports_topology = supports_processor_topology(); 806 if (supports_topology) { 807 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 808 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 809 } 810 if (!supports_topology || result == 0) { 811 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 812 } 813 } 814 return result; 815 } 816 817 static uint threads_per_core() { 818 uint result = 1; 819 if (is_intel() && supports_processor_topology()) { 820 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 821 } else if (is_zx() && supports_processor_topology()) { 822 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 823 } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { 824 if (cpu_family() >= 0x17) { 825 result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1; 826 } else { 827 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / 828 cores_per_cpu(); 829 } 830 } 831 return (result == 0 ? 1 : result); 832 } 833 834 static intx L1_line_size() { 835 intx result = 0; 836 if (is_intel()) { 837 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 838 } else if (is_amd_family()) { 839 result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size; 840 } else if (is_zx()) { 841 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 842 } 843 if (result < 32) // not defined ? 844 result = 32; // 32 bytes by default on x86 and other x64 845 return result; 846 } 847 848 static intx prefetch_data_size() { 849 return L1_line_size(); 850 } 851 852 // 853 // Feature identification 854 // 855 static bool supports_cpuid() { return _features != 0; } 856 static bool supports_cmpxchg8() { return (_features & CPU_CX8) != 0; } 857 static bool supports_cmov() { return (_features & CPU_CMOV) != 0; } 858 static bool supports_fxsr() { return (_features & CPU_FXSR) != 0; } 859 static bool supports_ht() { return (_features & CPU_HT) != 0; } 860 static bool supports_mmx() { return (_features & CPU_MMX) != 0; } 861 static bool supports_sse() { return (_features & CPU_SSE) != 0; } 862 static bool supports_sse2() { return (_features & CPU_SSE2) != 0; } 863 static bool supports_sse3() { return (_features & CPU_SSE3) != 0; } 864 static bool supports_ssse3() { return (_features & CPU_SSSE3)!= 0; } 865 static bool supports_sse4_1() { return (_features & CPU_SSE4_1) != 0; } 866 static bool supports_sse4_2() { return (_features & CPU_SSE4_2) != 0; } 867 static bool supports_popcnt() { return (_features & CPU_POPCNT) != 0; } 868 static bool supports_avx() { return (_features & CPU_AVX) != 0; } 869 static bool supports_avx2() { return (_features & CPU_AVX2) != 0; } 870 static bool supports_tsc() { return (_features & CPU_TSC) != 0; } 871 static bool supports_aes() { return (_features & CPU_AES) != 0; } 872 static bool supports_erms() { return (_features & CPU_ERMS) != 0; } 873 static bool supports_clmul() { return (_features & CPU_CLMUL) != 0; } 874 static bool supports_rtm() { return (_features & CPU_RTM) != 0; } 875 static bool supports_bmi1() { return (_features & CPU_BMI1) != 0; } 876 static bool supports_bmi2() { return (_features & CPU_BMI2) != 0; } 877 static bool supports_adx() { return (_features & CPU_ADX) != 0; } 878 static bool supports_evex() { return (_features & CPU_AVX512F) != 0; } 879 static bool supports_avx512dq() { return (_features & CPU_AVX512DQ) != 0; } 880 static bool supports_avx512pf() { return (_features & CPU_AVX512PF) != 0; } 881 static bool supports_avx512er() { return (_features & CPU_AVX512ER) != 0; } 882 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; } 883 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; } 884 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; } 885 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); } 886 static bool supports_avx512bwdq() { return (supports_evex() && supports_avx512bw() && supports_avx512dq()); } 887 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); } 888 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() && 889 supports_avx512bw() && supports_avx512dq()); } 890 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); } 891 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); } 892 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); } 893 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); } 894 static bool supports_sha() { return (_features & CPU_SHA) != 0; } 895 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); } 896 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; } 897 static bool supports_avx512_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; } 898 static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; } 899 static bool supports_avx512_vaes() { return (_features & CPU_AVX512_VAES) != 0; } 900 static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; } 901 static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; } 902 static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; } 903 static bool supports_hv() { return (_features & CPU_HV) != 0; } 904 static bool supports_serialize() { return (_features & CPU_SERIALIZE) != 0; } 905 906 // Intel features 907 static bool is_intel_family_core() { return is_intel() && 908 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } 909 910 static bool is_intel_skylake() { return is_intel_family_core() && 911 extended_cpu_model() == CPU_MODEL_SKYLAKE; } 912 913 static int avx3_threshold(); 914 915 static bool is_intel_tsc_synched_at_init() { 916 if (is_intel_family_core()) { 917 uint32_t ext_model = extended_cpu_model(); 918 if (ext_model == CPU_MODEL_NEHALEM_EP || 919 ext_model == CPU_MODEL_WESTMERE_EP || 920 ext_model == CPU_MODEL_SANDYBRIDGE_EP || 921 ext_model == CPU_MODEL_IVYBRIDGE_EP) { 922 // <= 2-socket invariant tsc support. EX versions are usually used 923 // in > 2-socket systems and likely don't synchronize tscs at 924 // initialization. 925 // Code that uses tsc values must be prepared for them to arbitrarily 926 // jump forward or backward. 927 return true; 928 } 929 } 930 return false; 931 } 932 933 // This checks if the JVM is potentially affected by an erratum on Intel CPUs (SKX102) 934 // that causes unpredictable behaviour when jcc crosses 64 byte boundaries. Its microcode 935 // mitigation causes regressions when jumps or fused conditional branches cross or end at 936 // 32 byte boundaries. 937 static bool has_intel_jcc_erratum() { return _has_intel_jcc_erratum; } 938 939 // AMD features 940 static bool supports_3dnow_prefetch() { return (_features & CPU_3DNOW_PREFETCH) != 0; } 941 static bool supports_lzcnt() { return (_features & CPU_LZCNT) != 0; } 942 static bool supports_sse4a() { return (_features & CPU_SSE4A) != 0; } 943 944 static bool is_amd_Barcelona() { return is_amd() && 945 extended_cpu_family() == CPU_FAMILY_AMD_11H; } 946 947 // Intel and AMD newer cores support fast timestamps well 948 static bool supports_tscinv_bit() { 949 return (_features & CPU_TSCINV_BIT) != 0; 950 } 951 static bool supports_tscinv() { 952 return (_features & CPU_TSCINV) != 0; 953 } 954 955 // Intel Core and newer cpus have fast IDIV instruction (excluding Atom). 956 static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 && 957 supports_sse3() && _model != 0x1C; } 958 959 static bool supports_compare_and_exchange() { return true; } 960 961 static intx allocate_prefetch_distance(bool use_watermark_prefetch) { 962 // Hardware prefetching (distance/size in bytes): 963 // Pentium 3 - 64 / 32 964 // Pentium 4 - 256 / 128 965 // Athlon - 64 / 32 ???? 966 // Opteron - 128 / 64 only when 2 sequential cache lines accessed 967 // Core - 128 / 64 968 // 969 // Software prefetching (distance in bytes / instruction with best score): 970 // Pentium 3 - 128 / prefetchnta 971 // Pentium 4 - 512 / prefetchnta 972 // Athlon - 128 / prefetchnta 973 // Opteron - 256 / prefetchnta 974 // Core - 256 / prefetchnta 975 // It will be used only when AllocatePrefetchStyle > 0 976 977 if (is_amd_family()) { // AMD | Hygon 978 if (supports_sse2()) { 979 return 256; // Opteron 980 } else { 981 return 128; // Athlon 982 } 983 } else { // Intel 984 if (supports_sse3() && cpu_family() == 6) { 985 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 986 return 192; 987 } else if (use_watermark_prefetch) { // watermark prefetching on Core 988 #ifdef _LP64 989 return 384; 990 #else 991 return 320; 992 #endif 993 } 994 } 995 if (supports_sse2()) { 996 if (cpu_family() == 6) { 997 return 256; // Pentium M, Core, Core2 998 } else { 999 return 512; // Pentium 4 1000 } 1001 } else { 1002 return 128; // Pentium 3 (and all other old CPUs) 1003 } 1004 } 1005 } 1006 1007 // SSE2 and later processors implement a 'pause' instruction 1008 // that can be used for efficient implementation of 1009 // the intrinsic for java.lang.Thread.onSpinWait() 1010 static bool supports_on_spin_wait() { return supports_sse2(); } 1011 1012 // x86_64 supports fast class initialization checks for static methods. 1013 static bool supports_fast_class_init_checks() { 1014 return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32 1015 } 1016 1017 constexpr static bool supports_stack_watermark_barrier() { 1018 return true; 1019 } 1020 1021 // there are several insns to force cache line sync to memory which 1022 // we can use to ensure mapped non-volatile memory is up to date with 1023 // pending in-cache changes. 1024 // 1025 // 64 bit cpus always support clflush which writes back and evicts 1026 // on 32 bit cpus support is recorded via a feature flag 1027 // 1028 // clflushopt is optional and acts like clflush except it does 1029 // not synchronize with other memory ops. it needs a preceding 1030 // and trailing StoreStore fence 1031 // 1032 // clwb is an optional intel-specific instruction which 1033 // writes back without evicting the line. it also does not 1034 // synchronize with other memory ops. so, it needs preceding 1035 // and trailing StoreStore fences. 1036 1037 #ifdef _LP64 1038 1039 static bool supports_clflush(); // Can't inline due to header file conflict 1040 #else 1041 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); } 1042 #endif // _LP64 1043 // Note: CPU_FLUSHOPT and CPU_CLWB bits should always be zero for 32-bit 1044 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); } 1045 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); } 1046 1047 // Old CPUs perform lea on AGU which causes additional latency transferring the 1048 // value from/to ALU for other operations 1049 static bool supports_fast_2op_lea() { 1050 return (is_intel() && supports_avx()) || // Sandy Bridge and above 1051 (is_amd() && supports_avx()); // Jaguar and Bulldozer and above 1052 } 1053 1054 // Pre Icelake Intels suffer inefficiency regarding 3-operand lea, which contains 1055 // all of base register, index register and displacement immediate, with 3 latency. 1056 // Note that when the address contains no displacement but the base register is 1057 // rbp or r13, the machine code must contain a zero displacement immediate, 1058 // effectively transform a 2-operand lea into a 3-operand lea. This can be 1059 // replaced by add-add or lea-add 1060 static bool supports_fast_3op_lea() { 1061 return supports_fast_2op_lea() && 1062 ((is_intel() && supports_clwb() && !is_intel_skylake()) || // Icelake and above 1063 is_amd()); 1064 } 1065 1066 #ifdef __APPLE__ 1067 // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64) 1068 static bool is_cpu_emulated(); 1069 #endif 1070 1071 // support functions for virtualization detection 1072 private: 1073 static void check_virtualizations(); 1074 1075 static const char* cpu_family_description(void); 1076 static const char* cpu_model_description(void); 1077 static const char* cpu_brand(void); 1078 static const char* cpu_brand_string(void); 1079 1080 static int cpu_type_description(char* const buf, size_t buf_len); 1081 static int cpu_detailed_description(char* const buf, size_t buf_len); 1082 static int cpu_extended_brand_string(char* const buf, size_t buf_len); 1083 1084 static bool cpu_is_em64t(void); 1085 static bool is_netburst(void); 1086 1087 // Returns bytes written excluding termninating null byte. 1088 static size_t cpu_write_support_string(char* const buf, size_t buf_len); 1089 static void resolve_cpu_information_details(void); 1090 static int64_t max_qualified_cpu_freq_from_brand_string(void); 1091 1092 public: 1093 // Offsets for cpuid asm stub brand string 1094 static ByteSize proc_name_0_offset() { return byte_offset_of(CpuidInfo, proc_name_0); } 1095 static ByteSize proc_name_1_offset() { return byte_offset_of(CpuidInfo, proc_name_1); } 1096 static ByteSize proc_name_2_offset() { return byte_offset_of(CpuidInfo, proc_name_2); } 1097 static ByteSize proc_name_3_offset() { return byte_offset_of(CpuidInfo, proc_name_3); } 1098 static ByteSize proc_name_4_offset() { return byte_offset_of(CpuidInfo, proc_name_4); } 1099 static ByteSize proc_name_5_offset() { return byte_offset_of(CpuidInfo, proc_name_5); } 1100 static ByteSize proc_name_6_offset() { return byte_offset_of(CpuidInfo, proc_name_6); } 1101 static ByteSize proc_name_7_offset() { return byte_offset_of(CpuidInfo, proc_name_7); } 1102 static ByteSize proc_name_8_offset() { return byte_offset_of(CpuidInfo, proc_name_8); } 1103 static ByteSize proc_name_9_offset() { return byte_offset_of(CpuidInfo, proc_name_9); } 1104 static ByteSize proc_name_10_offset() { return byte_offset_of(CpuidInfo, proc_name_10); } 1105 static ByteSize proc_name_11_offset() { return byte_offset_of(CpuidInfo, proc_name_11); } 1106 1107 static int64_t maximum_qualified_cpu_frequency(void); 1108 1109 static bool supports_tscinv_ext(void); 1110 1111 static void initialize_tsc(); 1112 static void initialize_cpu_information(void); 1113 }; 1114 1115 #endif // CPU_X86_VM_VERSION_X86_HPP