1 /*
   2  * Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "asm/macroAssembler.hpp"
  26 #include "asm/macroAssembler.inline.hpp"
  27 #include "classfile/vmIntrinsics.hpp"
  28 #include "code/codeBlob.hpp"
  29 #include "compiler/compilerDefinitions.inline.hpp"
  30 #include "jvm.h"
  31 #include "logging/log.hpp"
  32 #include "logging/logStream.hpp"
  33 #include "memory/resourceArea.hpp"
  34 #include "memory/universe.hpp"
  35 #include "runtime/globals_extension.hpp"
  36 #include "runtime/java.hpp"
  37 #include "runtime/os.inline.hpp"
  38 #include "runtime/stubCodeGenerator.hpp"
  39 #include "runtime/vm_version.hpp"
  40 #include "utilities/checkedCast.hpp"
  41 #include "utilities/ostream.hpp"
  42 #include "utilities/powerOfTwo.hpp"
  43 #include "utilities/virtualizationSupport.hpp"
  44 
  45 int VM_Version::_cpu;
  46 int VM_Version::_model;
  47 int VM_Version::_stepping;
  48 bool VM_Version::_has_intel_jcc_erratum;
  49 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
  50 
  51 #define DECLARE_CPU_FEATURE_NAME(id, name, bit) XSTR(name),
  52 const char* VM_Version::_features_names[] = { CPU_FEATURE_FLAGS(DECLARE_CPU_FEATURE_NAME)};
  53 #undef DECLARE_CPU_FEATURE_NAME
  54 
  55 // Address of instruction which causes SEGV
  56 address VM_Version::_cpuinfo_segv_addr = nullptr;
  57 // Address of instruction after the one which causes SEGV
  58 address VM_Version::_cpuinfo_cont_addr = nullptr;
  59 // Address of instruction which causes APX specific SEGV
  60 address VM_Version::_cpuinfo_segv_addr_apx = nullptr;
  61 // Address of instruction after the one which causes APX specific SEGV
  62 address VM_Version::_cpuinfo_cont_addr_apx = nullptr;
  63 
  64 static BufferBlob* stub_blob;
  65 static const int stub_size = 2550;
  66 
  67 int VM_Version::VM_Features::_features_bitmap_size = sizeof(VM_Version::VM_Features::_features_bitmap) / BytesPerLong;
  68 
  69 VM_Version::VM_Features VM_Version::_features;
  70 VM_Version::VM_Features VM_Version::_cpu_features;
  71 
  72 extern "C" {
  73   typedef void (*get_cpu_info_stub_t)(void*);
  74   typedef void (*detect_virt_stub_t)(uint32_t, uint32_t*);
  75   typedef void (*clear_apx_test_state_t)(void);
  76   typedef void (*getCPUIDBrandString_stub_t)(void*);
  77 }
  78 static get_cpu_info_stub_t get_cpu_info_stub = nullptr;
  79 static detect_virt_stub_t detect_virt_stub = nullptr;
  80 static clear_apx_test_state_t clear_apx_test_state_stub = nullptr;
  81 static getCPUIDBrandString_stub_t getCPUIDBrandString_stub = nullptr;
  82 
  83 bool VM_Version::supports_clflush() {
  84   // clflush should always be available on x86_64
  85   // if not we are in real trouble because we rely on it
  86   // to flush the code cache.
  87   // Unfortunately, Assembler::clflush is currently called as part
  88   // of generation of the code cache flush routine. This happens
  89   // under Universe::init before the processor features are set
  90   // up. Assembler::flush calls this routine to check that clflush
  91   // is allowed. So, we give the caller a free pass if Universe init
  92   // is still in progress.
  93   assert ((!Universe::is_fully_initialized() || _features.supports_feature(CPU_FLUSH)), "clflush should be available");
  94   return true;
  95 }
  96 
  97 #define CPUID_STANDARD_FN   0x0
  98 #define CPUID_STANDARD_FN_1 0x1
  99 #define CPUID_STANDARD_FN_4 0x4
 100 #define CPUID_STANDARD_FN_B 0xb
 101 
 102 #define CPUID_EXTENDED_FN   0x80000000
 103 #define CPUID_EXTENDED_FN_1 0x80000001
 104 #define CPUID_EXTENDED_FN_2 0x80000002
 105 #define CPUID_EXTENDED_FN_3 0x80000003
 106 #define CPUID_EXTENDED_FN_4 0x80000004
 107 #define CPUID_EXTENDED_FN_7 0x80000007
 108 #define CPUID_EXTENDED_FN_8 0x80000008
 109 
 110 class VM_Version_StubGenerator: public StubCodeGenerator {
 111  public:
 112 
 113   VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
 114 
 115   address clear_apx_test_state() {
 116 #   define __ _masm->
 117     address start = __ pc();
 118     // EGPRs are call clobbered registers, Explicit clearing of r16 and r31 during signal
 119     // handling guarantees that preserved register values post signal handling were
 120     // re-instantiated by operating system and not because they were not modified externally.
 121 
 122     bool save_apx = UseAPX;
 123     VM_Version::set_apx_cpuFeatures();
 124     UseAPX = true;
 125     // EGPR state save/restoration.
 126     __ mov64(r16, 0L);
 127     __ mov64(r31, 0L);
 128     UseAPX = save_apx;
 129     VM_Version::clean_cpuFeatures();
 130     __ ret(0);
 131     return start;
 132   }
 133 
 134   address generate_get_cpu_info() {
 135     // Flags to test CPU type.
 136     const uint32_t HS_EFL_AC = 0x40000;
 137     const uint32_t HS_EFL_ID = 0x200000;
 138     // Values for when we don't have a CPUID instruction.
 139     const int      CPU_FAMILY_SHIFT = 8;
 140     const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
 141     const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
 142     bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
 143 
 144     Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4, std_cpuid24, std_cpuid29;
 145     Label sef_cpuid, sefsl1_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7;
 146     Label ext_cpuid8, done, wrapup, vector_save_restore, apx_save_restore_warning, apx_xstate;
 147     Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
 148 
 149     StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
 150 #   define __ _masm->
 151 
 152     address start = __ pc();
 153 
 154     //
 155     // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
 156     //
 157     // rcx and rdx are first and second argument registers on windows
 158 
 159     __ push(rbp);
 160     __ mov(rbp, c_rarg0); // cpuid_info address
 161     __ push(rbx);
 162     __ push(rsi);
 163     __ pushf();          // preserve rbx, and flags
 164     __ pop(rax);
 165     __ push(rax);
 166     __ mov(rcx, rax);
 167     //
 168     // if we are unable to change the AC flag, we have a 386
 169     //
 170     __ xorl(rax, HS_EFL_AC);
 171     __ push(rax);
 172     __ popf();
 173     __ pushf();
 174     __ pop(rax);
 175     __ cmpptr(rax, rcx);
 176     __ jccb(Assembler::notEqual, detect_486);
 177 
 178     __ movl(rax, CPU_FAMILY_386);
 179     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 180     __ jmp(done);
 181 
 182     //
 183     // If we are unable to change the ID flag, we have a 486 which does
 184     // not support the "cpuid" instruction.
 185     //
 186     __ bind(detect_486);
 187     __ mov(rax, rcx);
 188     __ xorl(rax, HS_EFL_ID);
 189     __ push(rax);
 190     __ popf();
 191     __ pushf();
 192     __ pop(rax);
 193     __ cmpptr(rcx, rax);
 194     __ jccb(Assembler::notEqual, detect_586);
 195 
 196     __ bind(cpu486);
 197     __ movl(rax, CPU_FAMILY_486);
 198     __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
 199     __ jmp(done);
 200 
 201     //
 202     // At this point, we have a chip which supports the "cpuid" instruction
 203     //
 204     __ bind(detect_586);
 205     __ xorl(rax, rax);
 206     __ cpuid();
 207     __ orl(rax, rax);
 208     __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
 209                                         // value of at least 1, we give up and
 210                                         // assume a 486
 211     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
 212     __ movl(Address(rsi, 0), rax);
 213     __ movl(Address(rsi, 4), rbx);
 214     __ movl(Address(rsi, 8), rcx);
 215     __ movl(Address(rsi,12), rdx);
 216 
 217     __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
 218     __ jccb(Assembler::belowEqual, std_cpuid4);
 219 
 220     //
 221     // cpuid(0xB) Processor Topology
 222     //
 223     __ movl(rax, 0xb);
 224     __ xorl(rcx, rcx);   // Threads level
 225     __ cpuid();
 226 
 227     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
 228     __ movl(Address(rsi, 0), rax);
 229     __ movl(Address(rsi, 4), rbx);
 230     __ movl(Address(rsi, 8), rcx);
 231     __ movl(Address(rsi,12), rdx);
 232 
 233     __ movl(rax, 0xb);
 234     __ movl(rcx, 1);     // Cores level
 235     __ cpuid();
 236     __ push(rax);
 237     __ andl(rax, 0x1f);  // Determine if valid topology level
 238     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 239     __ andl(rax, 0xffff);
 240     __ pop(rax);
 241     __ jccb(Assembler::equal, std_cpuid4);
 242 
 243     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
 244     __ movl(Address(rsi, 0), rax);
 245     __ movl(Address(rsi, 4), rbx);
 246     __ movl(Address(rsi, 8), rcx);
 247     __ movl(Address(rsi,12), rdx);
 248 
 249     __ movl(rax, 0xb);
 250     __ movl(rcx, 2);     // Packages level
 251     __ cpuid();
 252     __ push(rax);
 253     __ andl(rax, 0x1f);  // Determine if valid topology level
 254     __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
 255     __ andl(rax, 0xffff);
 256     __ pop(rax);
 257     __ jccb(Assembler::equal, std_cpuid4);
 258 
 259     __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
 260     __ movl(Address(rsi, 0), rax);
 261     __ movl(Address(rsi, 4), rbx);
 262     __ movl(Address(rsi, 8), rcx);
 263     __ movl(Address(rsi,12), rdx);
 264 
 265     //
 266     // cpuid(0x4) Deterministic cache params
 267     //
 268     __ bind(std_cpuid4);
 269     __ movl(rax, 4);
 270     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
 271     __ jccb(Assembler::greater, std_cpuid1);
 272 
 273     __ xorl(rcx, rcx);   // L1 cache
 274     __ cpuid();
 275     __ push(rax);
 276     __ andl(rax, 0x1f);  // Determine if valid cache parameters used
 277     __ orl(rax, rax);    // eax[4:0] == 0 indicates invalid cache
 278     __ pop(rax);
 279     __ jccb(Assembler::equal, std_cpuid1);
 280 
 281     __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
 282     __ movl(Address(rsi, 0), rax);
 283     __ movl(Address(rsi, 4), rbx);
 284     __ movl(Address(rsi, 8), rcx);
 285     __ movl(Address(rsi,12), rdx);
 286 
 287     //
 288     // Standard cpuid(0x1)
 289     //
 290     __ bind(std_cpuid1);
 291     __ movl(rax, 1);
 292     __ cpuid();
 293     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 294     __ movl(Address(rsi, 0), rax);
 295     __ movl(Address(rsi, 4), rbx);
 296     __ movl(Address(rsi, 8), rcx);
 297     __ movl(Address(rsi,12), rdx);
 298 
 299     //
 300     // Check if OS has enabled XGETBV instruction to access XCR0
 301     // (OSXSAVE feature flag) and CPU supports AVX
 302     //
 303     __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
 304     __ cmpl(rcx, 0x18000000);
 305     __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
 306 
 307     //
 308     // XCR0, XFEATURE_ENABLED_MASK register
 309     //
 310     __ xorl(rcx, rcx);   // zero for XCR0 register
 311     __ xgetbv();
 312     __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
 313     __ movl(Address(rsi, 0), rax);
 314     __ movl(Address(rsi, 4), rdx);
 315 
 316     //
 317     // cpuid(0x7) Structured Extended Features Enumeration Leaf.
 318     //
 319     __ bind(sef_cpuid);
 320     __ movl(rax, 7);
 321     __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
 322     __ jccb(Assembler::greater, ext_cpuid);
 323     // ECX = 0
 324     __ xorl(rcx, rcx);
 325     __ cpuid();
 326     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 327     __ movl(Address(rsi, 0), rax);
 328     __ movl(Address(rsi, 4), rbx);
 329     __ movl(Address(rsi, 8), rcx);
 330     __ movl(Address(rsi, 12), rdx);
 331 
 332     //
 333     // cpuid(0x7) Structured Extended Features Enumeration Sub-Leaf 1.
 334     //
 335     __ bind(sefsl1_cpuid);
 336     __ movl(rax, 7);
 337     __ movl(rcx, 1);
 338     __ cpuid();
 339     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
 340     __ movl(Address(rsi, 0), rax);
 341     __ movl(Address(rsi, 4), rdx);
 342 
 343     //
 344     // cpuid(0x29) APX NCI NDD NF (EAX = 29H, ECX = 0).
 345     //
 346     __ bind(std_cpuid29);
 347     __ movl(rax, 0x29);
 348     __ movl(rcx, 0);
 349     __ cpuid();
 350     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid29_offset())));
 351     __ movl(Address(rsi, 0), rbx);
 352 
 353     //
 354     // cpuid(0x24) Converged Vector ISA Main Leaf (EAX = 24H, ECX = 0).
 355     //
 356     __ bind(std_cpuid24);
 357     __ movl(rax, 0x24);
 358     __ movl(rcx, 0);
 359     __ cpuid();
 360     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid24_offset())));
 361     __ movl(Address(rsi, 0), rax);
 362     __ movl(Address(rsi, 4), rbx);
 363 
 364     //
 365     // Extended cpuid(0x80000000)
 366     //
 367     __ bind(ext_cpuid);
 368     __ movl(rax, 0x80000000);
 369     __ cpuid();
 370     __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
 371     __ jcc(Assembler::belowEqual, done);
 372     __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
 373     __ jcc(Assembler::belowEqual, ext_cpuid1);
 374     __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
 375     __ jccb(Assembler::belowEqual, ext_cpuid5);
 376     __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
 377     __ jccb(Assembler::belowEqual, ext_cpuid7);
 378     __ cmpl(rax, 0x80000008);     // Is cpuid(0x80000009 and above) supported?
 379     __ jccb(Assembler::belowEqual, ext_cpuid8);
 380     __ cmpl(rax, 0x8000001E);     // Is cpuid(0x8000001E) supported?
 381     __ jccb(Assembler::below, ext_cpuid8);
 382     //
 383     // Extended cpuid(0x8000001E)
 384     //
 385     __ movl(rax, 0x8000001E);
 386     __ cpuid();
 387     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
 388     __ movl(Address(rsi, 0), rax);
 389     __ movl(Address(rsi, 4), rbx);
 390     __ movl(Address(rsi, 8), rcx);
 391     __ movl(Address(rsi,12), rdx);
 392 
 393     //
 394     // Extended cpuid(0x80000008)
 395     //
 396     __ bind(ext_cpuid8);
 397     __ movl(rax, 0x80000008);
 398     __ cpuid();
 399     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
 400     __ movl(Address(rsi, 0), rax);
 401     __ movl(Address(rsi, 4), rbx);
 402     __ movl(Address(rsi, 8), rcx);
 403     __ movl(Address(rsi,12), rdx);
 404 
 405     //
 406     // Extended cpuid(0x80000007)
 407     //
 408     __ bind(ext_cpuid7);
 409     __ movl(rax, 0x80000007);
 410     __ cpuid();
 411     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
 412     __ movl(Address(rsi, 0), rax);
 413     __ movl(Address(rsi, 4), rbx);
 414     __ movl(Address(rsi, 8), rcx);
 415     __ movl(Address(rsi,12), rdx);
 416 
 417     //
 418     // Extended cpuid(0x80000005)
 419     //
 420     __ bind(ext_cpuid5);
 421     __ movl(rax, 0x80000005);
 422     __ cpuid();
 423     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
 424     __ movl(Address(rsi, 0), rax);
 425     __ movl(Address(rsi, 4), rbx);
 426     __ movl(Address(rsi, 8), rcx);
 427     __ movl(Address(rsi,12), rdx);
 428 
 429     //
 430     // Extended cpuid(0x80000001)
 431     //
 432     __ bind(ext_cpuid1);
 433     __ movl(rax, 0x80000001);
 434     __ cpuid();
 435     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
 436     __ movl(Address(rsi, 0), rax);
 437     __ movl(Address(rsi, 4), rbx);
 438     __ movl(Address(rsi, 8), rcx);
 439     __ movl(Address(rsi,12), rdx);
 440 
 441     //
 442     // Check if OS has enabled XGETBV instruction to access XCR0
 443     // (OSXSAVE feature flag) and CPU supports APX
 444     //
 445     // To enable APX, check CPUID.EAX=7.ECX=1.EDX[21] bit for HW support
 446     // and XCRO[19] bit for OS support to save/restore extended GPR state.
 447     __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
 448     __ movl(rax, 0x200000);
 449     __ andl(rax, Address(rsi, 4));
 450     __ jcc(Assembler::equal, vector_save_restore);
 451     // check _cpuid_info.xem_xcr0_eax.bits.apx_f
 452     __ movl(rax, 0x80000);
 453     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits apx_f
 454     __ jcc(Assembler::equal, vector_save_restore);
 455 
 456     bool save_apx = UseAPX;
 457     VM_Version::set_apx_cpuFeatures();
 458     UseAPX = true;
 459     __ mov64(r16, VM_Version::egpr_test_value());
 460     __ mov64(r31, VM_Version::egpr_test_value());
 461     __ xorl(rsi, rsi);
 462     VM_Version::set_cpuinfo_segv_addr_apx(__ pc());
 463     // Generate SEGV
 464     __ movl(rax, Address(rsi, 0));
 465 
 466     VM_Version::set_cpuinfo_cont_addr_apx(__ pc());
 467     __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_save_offset())));
 468     __ movq(Address(rsi, 0), r16);
 469     __ movq(Address(rsi, 8), r31);
 470 
 471     //
 472     // Query CPUID 0xD.19 for APX XSAVE offset
 473     // Extended State Enumeration Sub-leaf 19 (APX)
 474     // EAX = size of APX state (should be 128)
 475     // EBX = offset in standard XSAVE format
 476     //
 477     __ movl(rax, 0xD);
 478     __ movl(rcx, 19);
 479     __ cpuid();
 480     __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_size_offset())));
 481     __ movl(Address(rsi, 0), rax);
 482     __ lea(rsi, Address(rbp, in_bytes(VM_Version::apx_xstate_offset_offset())));
 483     __ movl(Address(rsi, 0), rbx);
 484 
 485     UseAPX = save_apx;
 486     __ bind(vector_save_restore);
 487     //
 488     // Check if OS has enabled XGETBV instruction to access XCR0
 489     // (OSXSAVE feature flag) and CPU supports AVX
 490     //
 491     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 492     __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
 493     __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx
 494     __ cmpl(rcx, 0x18000000);
 495     __ jccb(Assembler::notEqual, done); // jump if AVX is not supported
 496 
 497     __ movl(rax, 0x6);
 498     __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 499     __ cmpl(rax, 0x6);
 500     __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported
 501 
 502     // we need to bridge farther than imm8, so we use this island as a thunk
 503     __ bind(done);
 504     __ jmp(wrapup);
 505 
 506     __ bind(start_simd_check);
 507     //
 508     // Some OSs have a bug when upper 128/256bits of YMM/ZMM
 509     // registers are not restored after a signal processing.
 510     // Generate SEGV here (reference through null)
 511     // and check upper YMM/ZMM bits after it.
 512     //
 513     int saved_useavx = UseAVX;
 514     int saved_usesse = UseSSE;
 515 
 516     // If UseAVX is uninitialized or is set by the user to include EVEX
 517     if (use_evex) {
 518       // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
 519       // OR check _cpuid_info.sefsl1_cpuid7_edx.bits.avx10
 520       __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 521       __ movl(rax, 0x10000);
 522       __ andl(rax, Address(rsi, 4));
 523       __ lea(rsi, Address(rbp, in_bytes(VM_Version::sefsl1_cpuid7_offset())));
 524       __ movl(rbx, 0x80000);
 525       __ andl(rbx, Address(rsi, 4));
 526       __ orl(rax, rbx);
 527       __ jccb(Assembler::equal, legacy_setup); // jump if EVEX is not supported
 528       // check _cpuid_info.xem_xcr0_eax.bits.opmask
 529       // check _cpuid_info.xem_xcr0_eax.bits.zmm512
 530       // check _cpuid_info.xem_xcr0_eax.bits.zmm32
 531       __ movl(rax, 0xE0);
 532       __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 533       __ cmpl(rax, 0xE0);
 534       __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
 535 
 536       if (FLAG_IS_DEFAULT(UseAVX)) {
 537         __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 538         __ movl(rax, Address(rsi, 0));
 539         __ cmpl(rax, 0x50654);              // If it is Skylake
 540         __ jcc(Assembler::equal, legacy_setup);
 541       }
 542       // EVEX setup: run in lowest evex mode
 543       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
 544       UseAVX = 3;
 545       UseSSE = 2;
 546 #ifdef _WINDOWS
 547       // xmm5-xmm15 are not preserved by caller on windows
 548       // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
 549       __ subptr(rsp, 64);
 550       __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
 551       __ subptr(rsp, 64);
 552       __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
 553       __ subptr(rsp, 64);
 554       __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
 555 #endif // _WINDOWS
 556 
 557       // load value into all 64 bytes of zmm7 register
 558       __ movl(rcx, VM_Version::ymm_test_value());
 559       __ movdl(xmm0, rcx);
 560       __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
 561       __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
 562       __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
 563       __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
 564       VM_Version::clean_cpuFeatures();
 565       __ jmp(save_restore_except);
 566     }
 567 
 568     __ bind(legacy_setup);
 569     // AVX setup
 570     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
 571     UseAVX = 1;
 572     UseSSE = 2;
 573 #ifdef _WINDOWS
 574     __ subptr(rsp, 32);
 575     __ vmovdqu(Address(rsp, 0), xmm7);
 576     __ subptr(rsp, 32);
 577     __ vmovdqu(Address(rsp, 0), xmm8);
 578     __ subptr(rsp, 32);
 579     __ vmovdqu(Address(rsp, 0), xmm15);
 580 #endif // _WINDOWS
 581 
 582     // load value into all 32 bytes of ymm7 register
 583     __ movl(rcx, VM_Version::ymm_test_value());
 584 
 585     __ movdl(xmm0, rcx);
 586     __ pshufd(xmm0, xmm0, 0x00);
 587     __ vinsertf128_high(xmm0, xmm0);
 588     __ vmovdqu(xmm7, xmm0);
 589     __ vmovdqu(xmm8, xmm0);
 590     __ vmovdqu(xmm15, xmm0);
 591     VM_Version::clean_cpuFeatures();
 592 
 593     __ bind(save_restore_except);
 594     __ xorl(rsi, rsi);
 595     VM_Version::set_cpuinfo_segv_addr(__ pc());
 596     // Generate SEGV
 597     __ movl(rax, Address(rsi, 0));
 598 
 599     VM_Version::set_cpuinfo_cont_addr(__ pc());
 600     // Returns here after signal. Save xmm0 to check it later.
 601 
 602     // If UseAVX is uninitialized or is set by the user to include EVEX
 603     if (use_evex) {
 604       // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
 605       __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
 606       __ movl(rax, 0x10000);
 607       __ andl(rax, Address(rsi, 4));
 608       __ jcc(Assembler::equal, legacy_save_restore);
 609       // check _cpuid_info.xem_xcr0_eax.bits.opmask
 610       // check _cpuid_info.xem_xcr0_eax.bits.zmm512
 611       // check _cpuid_info.xem_xcr0_eax.bits.zmm32
 612       __ movl(rax, 0xE0);
 613       __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
 614       __ cmpl(rax, 0xE0);
 615       __ jcc(Assembler::notEqual, legacy_save_restore);
 616 
 617       if (FLAG_IS_DEFAULT(UseAVX)) {
 618         __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 619         __ movl(rax, Address(rsi, 0));
 620         __ cmpl(rax, 0x50654);              // If it is Skylake
 621         __ jcc(Assembler::equal, legacy_save_restore);
 622       }
 623       // EVEX check: run in lowest evex mode
 624       VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
 625       UseAVX = 3;
 626       UseSSE = 2;
 627       __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
 628       __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
 629       __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
 630       __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
 631       __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
 632 
 633 #ifdef _WINDOWS
 634       __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
 635       __ addptr(rsp, 64);
 636       __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
 637       __ addptr(rsp, 64);
 638       __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
 639       __ addptr(rsp, 64);
 640 #endif // _WINDOWS
 641       generate_vzeroupper(wrapup);
 642       VM_Version::clean_cpuFeatures();
 643       UseAVX = saved_useavx;
 644       UseSSE = saved_usesse;
 645       __ jmp(wrapup);
 646    }
 647 
 648     __ bind(legacy_save_restore);
 649     // AVX check
 650     VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
 651     UseAVX = 1;
 652     UseSSE = 2;
 653     __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
 654     __ vmovdqu(Address(rsi, 0), xmm0);
 655     __ vmovdqu(Address(rsi, 32), xmm7);
 656     __ vmovdqu(Address(rsi, 64), xmm8);
 657     __ vmovdqu(Address(rsi, 96), xmm15);
 658 
 659 #ifdef _WINDOWS
 660     __ vmovdqu(xmm15, Address(rsp, 0));
 661     __ addptr(rsp, 32);
 662     __ vmovdqu(xmm8, Address(rsp, 0));
 663     __ addptr(rsp, 32);
 664     __ vmovdqu(xmm7, Address(rsp, 0));
 665     __ addptr(rsp, 32);
 666 #endif // _WINDOWS
 667 
 668     generate_vzeroupper(wrapup);
 669     VM_Version::clean_cpuFeatures();
 670     UseAVX = saved_useavx;
 671     UseSSE = saved_usesse;
 672 
 673     __ bind(wrapup);
 674     __ popf();
 675     __ pop(rsi);
 676     __ pop(rbx);
 677     __ pop(rbp);
 678     __ ret(0);
 679 
 680 #   undef __
 681 
 682     return start;
 683   };
 684   void generate_vzeroupper(Label& L_wrapup) {
 685 #   define __ _masm->
 686     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
 687     __ cmpl(Address(rsi, 4), 0x756e6547);  // 'uneG'
 688     __ jcc(Assembler::notEqual, L_wrapup);
 689     __ movl(rcx, 0x0FFF0FF0);
 690     __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
 691     __ andl(rcx, Address(rsi, 0));
 692     __ cmpl(rcx, 0x00050670);              // If it is Xeon Phi 3200/5200/7200
 693     __ jcc(Assembler::equal, L_wrapup);
 694     __ cmpl(rcx, 0x00080650);              // If it is Future Xeon Phi
 695     __ jcc(Assembler::equal, L_wrapup);
 696     // vzeroupper() will use a pre-computed instruction sequence that we
 697     // can't compute until after we've determined CPU capabilities. Use
 698     // uncached variant here directly to be able to bootstrap correctly
 699     __ vzeroupper_uncached();
 700 #   undef __
 701   }
 702   address generate_detect_virt() {
 703     StubCodeMark mark(this, "VM_Version", "detect_virt_stub");
 704 #   define __ _masm->
 705 
 706     address start = __ pc();
 707 
 708     // Evacuate callee-saved registers
 709     __ push(rbp);
 710     __ push(rbx);
 711     __ push(rsi); // for Windows
 712 
 713     __ mov(rax, c_rarg0); // CPUID leaf
 714     __ mov(rsi, c_rarg1); // register array address (eax, ebx, ecx, edx)
 715 
 716     __ cpuid();
 717 
 718     // Store result to register array
 719     __ movl(Address(rsi,  0), rax);
 720     __ movl(Address(rsi,  4), rbx);
 721     __ movl(Address(rsi,  8), rcx);
 722     __ movl(Address(rsi, 12), rdx);
 723 
 724     // Epilogue
 725     __ pop(rsi);
 726     __ pop(rbx);
 727     __ pop(rbp);
 728     __ ret(0);
 729 
 730 #   undef __
 731 
 732     return start;
 733   };
 734 
 735 
 736   address generate_getCPUIDBrandString(void) {
 737     // Flags to test CPU type.
 738     const uint32_t HS_EFL_AC           = 0x40000;
 739     const uint32_t HS_EFL_ID           = 0x200000;
 740     // Values for when we don't have a CPUID instruction.
 741     const int      CPU_FAMILY_SHIFT = 8;
 742     const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
 743     const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);
 744 
 745     Label detect_486, cpu486, detect_586, done, ext_cpuid;
 746 
 747     StubCodeMark mark(this, "VM_Version", "getCPUIDNameInfo_stub");
 748 #   define __ _masm->
 749 
 750     address start = __ pc();
 751 
 752     //
 753     // void getCPUIDBrandString(VM_Version::CpuidInfo* cpuid_info);
 754     //
 755     // rcx and rdx are first and second argument registers on windows
 756 
 757     __ push(rbp);
 758     __ mov(rbp, c_rarg0); // cpuid_info address
 759     __ push(rbx);
 760     __ push(rsi);
 761     __ pushf();          // preserve rbx, and flags
 762     __ pop(rax);
 763     __ push(rax);
 764     __ mov(rcx, rax);
 765     //
 766     // if we are unable to change the AC flag, we have a 386
 767     //
 768     __ xorl(rax, HS_EFL_AC);
 769     __ push(rax);
 770     __ popf();
 771     __ pushf();
 772     __ pop(rax);
 773     __ cmpptr(rax, rcx);
 774     __ jccb(Assembler::notEqual, detect_486);
 775 
 776     __ movl(rax, CPU_FAMILY_386);
 777     __ jmp(done);
 778 
 779     //
 780     // If we are unable to change the ID flag, we have a 486 which does
 781     // not support the "cpuid" instruction.
 782     //
 783     __ bind(detect_486);
 784     __ mov(rax, rcx);
 785     __ xorl(rax, HS_EFL_ID);
 786     __ push(rax);
 787     __ popf();
 788     __ pushf();
 789     __ pop(rax);
 790     __ cmpptr(rcx, rax);
 791     __ jccb(Assembler::notEqual, detect_586);
 792 
 793     __ bind(cpu486);
 794     __ movl(rax, CPU_FAMILY_486);
 795     __ jmp(done);
 796 
 797     //
 798     // At this point, we have a chip which supports the "cpuid" instruction
 799     //
 800     __ bind(detect_586);
 801     __ xorl(rax, rax);
 802     __ cpuid();
 803     __ orl(rax, rax);
 804     __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
 805                                         // value of at least 1, we give up and
 806                                         // assume a 486
 807 
 808     //
 809     // Extended cpuid(0x80000000) for processor brand string detection
 810     //
 811     __ bind(ext_cpuid);
 812     __ movl(rax, CPUID_EXTENDED_FN);
 813     __ cpuid();
 814     __ cmpl(rax, CPUID_EXTENDED_FN_4);
 815     __ jcc(Assembler::below, done);
 816 
 817     //
 818     // Extended cpuid(0x80000002)  // first 16 bytes in brand string
 819     //
 820     __ movl(rax, CPUID_EXTENDED_FN_2);
 821     __ cpuid();
 822     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_0_offset())));
 823     __ movl(Address(rsi, 0), rax);
 824     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_1_offset())));
 825     __ movl(Address(rsi, 0), rbx);
 826     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_2_offset())));
 827     __ movl(Address(rsi, 0), rcx);
 828     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_3_offset())));
 829     __ movl(Address(rsi,0), rdx);
 830 
 831     //
 832     // Extended cpuid(0x80000003) // next 16 bytes in brand string
 833     //
 834     __ movl(rax, CPUID_EXTENDED_FN_3);
 835     __ cpuid();
 836     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_4_offset())));
 837     __ movl(Address(rsi, 0), rax);
 838     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_5_offset())));
 839     __ movl(Address(rsi, 0), rbx);
 840     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_6_offset())));
 841     __ movl(Address(rsi, 0), rcx);
 842     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_7_offset())));
 843     __ movl(Address(rsi,0), rdx);
 844 
 845     //
 846     // Extended cpuid(0x80000004) // last 16 bytes in brand string
 847     //
 848     __ movl(rax, CPUID_EXTENDED_FN_4);
 849     __ cpuid();
 850     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_8_offset())));
 851     __ movl(Address(rsi, 0), rax);
 852     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_9_offset())));
 853     __ movl(Address(rsi, 0), rbx);
 854     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_10_offset())));
 855     __ movl(Address(rsi, 0), rcx);
 856     __ lea(rsi, Address(rbp, in_bytes(VM_Version::proc_name_11_offset())));
 857     __ movl(Address(rsi,0), rdx);
 858 
 859     //
 860     // return
 861     //
 862     __ bind(done);
 863     __ popf();
 864     __ pop(rsi);
 865     __ pop(rbx);
 866     __ pop(rbp);
 867     __ ret(0);
 868 
 869 #   undef __
 870 
 871     return start;
 872   };
 873 };
 874 
 875 void VM_Version::get_processor_features() {
 876 
 877   _cpu = 4; // 486 by default
 878   _model = 0;
 879   _stepping = 0;
 880   _logical_processors_per_package = 1;
 881   // i486 internal cache is both I&D and has a 16-byte line size
 882   _L1_data_cache_line_size = 16;
 883 
 884   // Get raw processor info
 885 
 886   get_cpu_info_stub(&_cpuid_info);
 887 
 888   assert_is_initialized();
 889   _cpu = extended_cpu_family();
 890   _model = extended_cpu_model();
 891   _stepping = cpu_stepping();
 892 
 893   if (cpu_family() > 4) { // it supports CPUID
 894     _features = _cpuid_info.feature_flags(); // These can be changed by VM settings
 895     _cpu_features = _features; // Preserve features
 896     // Logical processors are only available on P4s and above,
 897     // and only if hyperthreading is available.
 898     _logical_processors_per_package = logical_processor_count();
 899     _L1_data_cache_line_size = L1_line_size();
 900   }
 901 
 902   // xchg and xadd instructions
 903   _supports_atomic_getset4 = true;
 904   _supports_atomic_getadd4 = true;
 905   _supports_atomic_getset8 = true;
 906   _supports_atomic_getadd8 = true;
 907 
 908   // OS should support SSE for x64 and hardware should support at least SSE2.
 909   if (!VM_Version::supports_sse2()) {
 910     vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
 911   }
 912   // in 64 bit the use of SSE2 is the minimum
 913   if (UseSSE < 2) UseSSE = 2;
 914 
 915   // flush_icache_stub have to be generated first.
 916   // That is why Icache line size is hard coded in ICache class,
 917   // see icache_x86.hpp. It is also the reason why we can't use
 918   // clflush instruction in 32-bit VM since it could be running
 919   // on CPU which does not support it.
 920   //
 921   // The only thing we can do is to verify that flushed
 922   // ICache::line_size has correct value.
 923   guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
 924   // clflush_size is size in quadwords (8 bytes).
 925   guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
 926 
 927   // assigning this field effectively enables Unsafe.writebackMemory()
 928   // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
 929   // that is only implemented on x86_64 and only if the OS plays ball
 930   if (os::supports_map_sync()) {
 931     // publish data cache line flush size to generic field, otherwise
 932     // let if default to zero thereby disabling writeback
 933     _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
 934   }
 935 
 936   // Check if processor has Intel Ecore
 937   if (FLAG_IS_DEFAULT(EnableX86ECoreOpts) && is_intel() && is_intel_server_family() &&
 938     (supports_hybrid() ||
 939      _model == 0xAF /* Xeon 6 E-cores (Sierra Forest) */ ||
 940      _model == 0xDD /* Xeon 6+ E-cores (Clearwater Forest) */ )) {
 941     FLAG_SET_DEFAULT(EnableX86ECoreOpts, true);
 942   }
 943 
 944   if (UseSSE < 4) {
 945     _features.clear_feature(CPU_SSE4_1);
 946     _features.clear_feature(CPU_SSE4_2);
 947   }
 948 
 949   if (UseSSE < 3) {
 950     _features.clear_feature(CPU_SSE3);
 951     _features.clear_feature(CPU_SSSE3);
 952     _features.clear_feature(CPU_SSE4A);
 953   }
 954 
 955   if (UseSSE < 2)
 956     _features.clear_feature(CPU_SSE2);
 957 
 958   if (UseSSE < 1)
 959     _features.clear_feature(CPU_SSE);
 960 
 961   //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
 962   if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
 963     UseAVX = 0;
 964   }
 965 
 966   // UseSSE is set to the smaller of what hardware supports and what
 967   // the command line requires.  I.e., you cannot set UseSSE to 2 on
 968   // older Pentiums which do not support it.
 969   int use_sse_limit = 0;
 970   if (UseSSE > 0) {
 971     if (UseSSE > 3 && supports_sse4_1()) {
 972       use_sse_limit = 4;
 973     } else if (UseSSE > 2 && supports_sse3()) {
 974       use_sse_limit = 3;
 975     } else if (UseSSE > 1 && supports_sse2()) {
 976       use_sse_limit = 2;
 977     } else if (UseSSE > 0 && supports_sse()) {
 978       use_sse_limit = 1;
 979     } else {
 980       use_sse_limit = 0;
 981     }
 982   }
 983   if (FLAG_IS_DEFAULT(UseSSE)) {
 984     FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
 985   } else if (UseSSE > use_sse_limit) {
 986     warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", UseSSE, use_sse_limit);
 987     FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
 988   }
 989 
 990   // first try initial setting and detect what we can support
 991   int use_avx_limit = 0;
 992   if (UseAVX > 0) {
 993     if (UseSSE < 4) {
 994       // Don't use AVX if SSE is unavailable or has been disabled.
 995       use_avx_limit = 0;
 996     } else if (UseAVX > 2 && supports_evex()) {
 997       use_avx_limit = 3;
 998     } else if (UseAVX > 1 && supports_avx2()) {
 999       use_avx_limit = 2;
1000     } else if (UseAVX > 0 && supports_avx()) {
1001       use_avx_limit = 1;
1002     } else {
1003       use_avx_limit = 0;
1004     }
1005   }
1006   if (FLAG_IS_DEFAULT(UseAVX)) {
1007     // Don't use AVX-512 on older Skylakes unless explicitly requested.
1008     if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) {
1009       FLAG_SET_DEFAULT(UseAVX, 2);
1010     } else {
1011       FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
1012     }
1013   }
1014 
1015   if (UseAVX > use_avx_limit) {
1016     if (UseSSE < 4) {
1017       warning("UseAVX=%d requires UseSSE=4, setting it to UseAVX=0", UseAVX);
1018     } else {
1019       warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", UseAVX, use_avx_limit);
1020     }
1021     FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
1022   }
1023 
1024   if (UseAVX < 3) {
1025     _features.clear_feature(CPU_AVX512F);
1026     _features.clear_feature(CPU_AVX512DQ);
1027     _features.clear_feature(CPU_AVX512CD);
1028     _features.clear_feature(CPU_AVX512BW);
1029     _features.clear_feature(CPU_AVX512ER);
1030     _features.clear_feature(CPU_AVX512PF);
1031     _features.clear_feature(CPU_AVX512VL);
1032     _features.clear_feature(CPU_AVX512_VPOPCNTDQ);
1033     _features.clear_feature(CPU_AVX512_VPCLMULQDQ);
1034     _features.clear_feature(CPU_AVX512_VAES);
1035     _features.clear_feature(CPU_AVX512_VNNI);
1036     _features.clear_feature(CPU_AVX512_VBMI);
1037     _features.clear_feature(CPU_AVX512_VBMI2);
1038     _features.clear_feature(CPU_AVX512_BITALG);
1039     _features.clear_feature(CPU_AVX512_IFMA);
1040     _features.clear_feature(CPU_APX_F);
1041     _features.clear_feature(CPU_AVX512_FP16);
1042     _features.clear_feature(CPU_AVX10_1);
1043     _features.clear_feature(CPU_AVX10_2);
1044   }
1045 
1046 
1047   if (UseAVX < 2) {
1048     _features.clear_feature(CPU_AVX2);
1049     _features.clear_feature(CPU_AVX_IFMA);
1050   }
1051 
1052   if (UseAVX < 1) {
1053     _features.clear_feature(CPU_AVX);
1054     _features.clear_feature(CPU_VZEROUPPER);
1055     _features.clear_feature(CPU_F16C);
1056     _features.clear_feature(CPU_SHA512);
1057   }
1058 
1059   if (logical_processors_per_package() == 1) {
1060     // HT processor could be installed on a system which doesn't support HT.
1061     _features.clear_feature(CPU_HT);
1062   }
1063 
1064   if (is_intel()) { // Intel cpus specific settings
1065     if (is_knights_family()) {
1066       _features.clear_feature(CPU_VZEROUPPER);
1067       _features.clear_feature(CPU_AVX512BW);
1068       _features.clear_feature(CPU_AVX512VL);
1069       _features.clear_feature(CPU_APX_F);
1070       _features.clear_feature(CPU_AVX512DQ);
1071       _features.clear_feature(CPU_AVX512_VNNI);
1072       _features.clear_feature(CPU_AVX512_VAES);
1073       _features.clear_feature(CPU_AVX512_VPOPCNTDQ);
1074       _features.clear_feature(CPU_AVX512_VPCLMULQDQ);
1075       _features.clear_feature(CPU_AVX512_VBMI);
1076       _features.clear_feature(CPU_AVX512_VBMI2);
1077       _features.clear_feature(CPU_CLWB);
1078       _features.clear_feature(CPU_FLUSHOPT);
1079       _features.clear_feature(CPU_GFNI);
1080       _features.clear_feature(CPU_AVX512_BITALG);
1081       _features.clear_feature(CPU_AVX512_IFMA);
1082       _features.clear_feature(CPU_AVX_IFMA);
1083       _features.clear_feature(CPU_AVX512_FP16);
1084       _features.clear_feature(CPU_AVX10_1);
1085       _features.clear_feature(CPU_AVX10_2);
1086     }
1087   }
1088 
1089     // Currently APX support is only enabled for targets supporting AVX512VL feature.
1090   bool apx_supported = os_supports_apx_egprs() && supports_apx_f() && supports_avx512vl();
1091   if (UseAPX && !apx_supported) {
1092     warning("UseAPX is not supported on this CPU, setting it to false");
1093     FLAG_SET_DEFAULT(UseAPX, false);
1094   }
1095 
1096   if (!UseAPX) {
1097     _features.clear_feature(CPU_APX_F);
1098   }
1099 
1100   if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) {
1101     _has_intel_jcc_erratum = compute_has_intel_jcc_erratum();
1102     FLAG_SET_ERGO(IntelJccErratumMitigation, _has_intel_jcc_erratum);
1103   } else {
1104     _has_intel_jcc_erratum = IntelJccErratumMitigation;
1105   }
1106 
1107   assert(supports_clflush(), "Always present");
1108   if (X86ICacheSync == -1) {
1109     // Auto-detect, choosing the best performant one that still flushes
1110     // the cache. We could switch to CPUID/SERIALIZE ("4"/"5") going forward.
1111     if (supports_clwb()) {
1112       FLAG_SET_ERGO(X86ICacheSync, 3);
1113     } else if (supports_clflushopt()) {
1114       FLAG_SET_ERGO(X86ICacheSync, 2);
1115     } else {
1116       FLAG_SET_ERGO(X86ICacheSync, 1);
1117     }
1118   } else {
1119     if ((X86ICacheSync == 2) && !supports_clflushopt()) {
1120       vm_exit_during_initialization("CPU does not support CLFLUSHOPT, unable to use X86ICacheSync=2");
1121     }
1122     if ((X86ICacheSync == 3) && !supports_clwb()) {
1123       vm_exit_during_initialization("CPU does not support CLWB, unable to use X86ICacheSync=3");
1124     }
1125     if ((X86ICacheSync == 5) && !supports_serialize()) {
1126       vm_exit_during_initialization("CPU does not support SERIALIZE, unable to use X86ICacheSync=5");
1127     }
1128   }
1129 
1130   stringStream ss(2048);
1131   if (supports_hybrid()) {
1132     ss.print("(hybrid)");
1133   } else {
1134     ss.print("(%u cores per cpu, %u threads per core)", cores_per_cpu(), threads_per_core());
1135   }
1136   ss.print(" family %d model %d stepping %d microcode 0x%x",
1137            cpu_family(), _model, _stepping, os::cpu_microcode_revision());
1138   ss.print(", ");
1139   int features_offset = (int)ss.size();
1140   insert_features_names(_features, ss);
1141 
1142   _cpu_info_string = ss.as_string(true);
1143   _features_string = _cpu_info_string + features_offset;
1144 
1145   // Use AES instructions if available.
1146   if (supports_aes()) {
1147     if (FLAG_IS_DEFAULT(UseAES)) {
1148       FLAG_SET_DEFAULT(UseAES, true);
1149     }
1150     if (!UseAES) {
1151       if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1152         warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
1153       }
1154       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1155     } else {
1156       if (UseSSE > 2) {
1157         if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1158           FLAG_SET_DEFAULT(UseAESIntrinsics, true);
1159         }
1160       } else {
1161         // The AES intrinsic stubs require AES instruction support (of course)
1162         // but also require sse3 mode or higher for instructions it use.
1163         if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1164           warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
1165         }
1166         FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1167       }
1168 
1169       // --AES-CTR begins--
1170       if (!UseAESIntrinsics) {
1171         if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1172           warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
1173           FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1174         }
1175       } else {
1176         if (supports_sse4_1()) {
1177           if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1178             FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
1179           }
1180         } else {
1181            // The AES-CTR intrinsic stubs require AES instruction support (of course)
1182            // but also require sse4.1 mode or higher for instructions it use.
1183           if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1184              warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled.");
1185            }
1186            FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1187         }
1188       }
1189       // --AES-CTR ends--
1190     }
1191   } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
1192     if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
1193       warning("AES instructions are not available on this CPU");
1194       FLAG_SET_DEFAULT(UseAES, false);
1195     }
1196     if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1197       warning("AES intrinsics are not available on this CPU");
1198       FLAG_SET_DEFAULT(UseAESIntrinsics, false);
1199     }
1200     if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
1201       warning("AES-CTR intrinsics are not available on this CPU");
1202       FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
1203     }
1204   }
1205 
1206   // Use CLMUL instructions if available.
1207   if (supports_clmul()) {
1208     if (FLAG_IS_DEFAULT(UseCLMUL)) {
1209       UseCLMUL = true;
1210     }
1211   } else if (UseCLMUL) {
1212     if (!FLAG_IS_DEFAULT(UseCLMUL))
1213       warning("CLMUL instructions not available on this CPU (AVX may also be required)");
1214     FLAG_SET_DEFAULT(UseCLMUL, false);
1215   }
1216 
1217   if (UseCLMUL && (UseSSE > 2)) {
1218     if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
1219       UseCRC32Intrinsics = true;
1220     }
1221   } else if (UseCRC32Intrinsics) {
1222     if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
1223       warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
1224     FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
1225   }
1226 
1227   if (supports_avx2()) {
1228     if (FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
1229       UseAdler32Intrinsics = true;
1230     }
1231   } else if (UseAdler32Intrinsics) {
1232     if (!FLAG_IS_DEFAULT(UseAdler32Intrinsics)) {
1233       warning("Adler32 Intrinsics requires avx2 instructions (not available on this CPU)");
1234     }
1235     FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
1236   }
1237 
1238   if (supports_sse4_2() && supports_clmul()) {
1239     if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
1240       UseCRC32CIntrinsics = true;
1241     }
1242   } else if (UseCRC32CIntrinsics) {
1243     if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
1244       warning("CRC32C intrinsics are not available on this CPU");
1245     }
1246     FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
1247   }
1248 
1249   // GHASH/GCM intrinsics
1250   if (UseCLMUL && (UseSSE > 2)) {
1251     if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
1252       UseGHASHIntrinsics = true;
1253     }
1254   } else if (UseGHASHIntrinsics) {
1255     if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
1256       warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
1257     FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
1258   }
1259 
1260   // ChaCha20 Intrinsics
1261   // As long as the system supports AVX as a baseline we can do a
1262   // SIMD-enabled block function.  StubGenerator makes the determination
1263   // based on the VM capabilities whether to use an AVX2 or AVX512-enabled
1264   // version.
1265   if (UseAVX >= 1) {
1266       if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
1267           UseChaCha20Intrinsics = true;
1268       }
1269   } else if (UseChaCha20Intrinsics) {
1270       if (!FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
1271           warning("ChaCha20 intrinsic requires AVX instructions");
1272       }
1273       FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
1274   }
1275 
1276   // Kyber Intrinsics
1277   // Currently we only have them for AVX512
1278   if (supports_evex() && supports_avx512bw()) {
1279       if (FLAG_IS_DEFAULT(UseKyberIntrinsics)) {
1280           UseKyberIntrinsics = true;
1281       }
1282   } else
1283   if (UseKyberIntrinsics) {
1284      warning("Intrinsics for ML-KEM are not available on this CPU.");
1285      FLAG_SET_DEFAULT(UseKyberIntrinsics, false);
1286   }
1287 
1288   // Dilithium Intrinsics
1289   if (UseAVX > 1) {
1290       if (FLAG_IS_DEFAULT(UseDilithiumIntrinsics)) {
1291           UseDilithiumIntrinsics = true;
1292       }
1293   } else if (UseDilithiumIntrinsics) {
1294       warning("Intrinsics for ML-DSA are not available on this CPU.");
1295       FLAG_SET_DEFAULT(UseDilithiumIntrinsics, false);
1296   }
1297 
1298   // Base64 Intrinsics (Check the condition for which the intrinsic will be active)
1299   if (UseAVX >= 2) {
1300     if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) {
1301       UseBASE64Intrinsics = true;
1302     }
1303   } else if (UseBASE64Intrinsics) {
1304      if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics))
1305       warning("Base64 intrinsic requires EVEX instructions on this CPU");
1306     FLAG_SET_DEFAULT(UseBASE64Intrinsics, false);
1307   }
1308 
1309   if (supports_fma()) {
1310     if (FLAG_IS_DEFAULT(UseFMA)) {
1311       UseFMA = true;
1312     }
1313   } else if (UseFMA) {
1314     warning("FMA instructions are not available on this CPU");
1315     FLAG_SET_DEFAULT(UseFMA, false);
1316   }
1317 
1318   if (FLAG_IS_DEFAULT(UseMD5Intrinsics)) {
1319     UseMD5Intrinsics = true;
1320   }
1321 
1322   if (supports_sha() || (supports_avx2() && supports_bmi2())) {
1323     if (FLAG_IS_DEFAULT(UseSHA)) {
1324       UseSHA = true;
1325     }
1326   } else if (UseSHA) {
1327     warning("SHA instructions are not available on this CPU");
1328     FLAG_SET_DEFAULT(UseSHA, false);
1329   }
1330 
1331   if (supports_sha() && supports_sse4_1() && UseSHA) {
1332     if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
1333       FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
1334     }
1335   } else if (UseSHA1Intrinsics) {
1336     warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
1337     FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
1338   }
1339 
1340   if (supports_sse4_1() && UseSHA) {
1341     if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
1342       FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
1343     }
1344   } else if (UseSHA256Intrinsics) {
1345     warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
1346     FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
1347   }
1348 
1349   if (UseSHA && supports_avx2() && (supports_bmi2() || supports_sha512())) {
1350     if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
1351       FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
1352     }
1353   } else if (UseSHA512Intrinsics) {
1354     warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
1355     FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
1356   }
1357 
1358   if (supports_evex() && supports_avx512bw()) {
1359       if (FLAG_IS_DEFAULT(UseSHA3Intrinsics)) {
1360           UseSHA3Intrinsics = true;
1361       }
1362   } else if (UseSHA3Intrinsics) {
1363       warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
1364       FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
1365   }
1366 
1367   if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
1368     FLAG_SET_DEFAULT(UseSHA, false);
1369   }
1370 
1371 #if COMPILER2_OR_JVMCI
1372   int max_vector_size = 0;
1373   if (UseAVX == 0 || !os_supports_avx_vectors()) {
1374     // 16 byte vectors (in XMM) are supported with SSE2+
1375     max_vector_size = 16;
1376   } else if (UseAVX == 1 || UseAVX == 2) {
1377     // 32 bytes vectors (in YMM) are only supported with AVX+
1378     max_vector_size = 32;
1379   } else if (UseAVX > 2) {
1380     // 64 bytes vectors (in ZMM) are only supported with AVX 3
1381     max_vector_size = 64;
1382   }
1383 
1384   int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit
1385 
1386   if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
1387     if (MaxVectorSize < min_vector_size) {
1388       warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
1389       FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
1390     }
1391     if (MaxVectorSize > max_vector_size) {
1392       warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
1393       FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1394     }
1395     if (!is_power_of_2(MaxVectorSize)) {
1396       warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size);
1397       FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1398     }
1399   } else {
1400     // If default, use highest supported configuration
1401     FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1402   }
1403 
1404 #if defined(COMPILER2) && defined(ASSERT)
1405   if (MaxVectorSize > 0) {
1406     if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
1407       tty->print_cr("State of YMM registers after signal handle:");
1408       int nreg = 4;
1409       const char* ymm_name[4] = {"0", "7", "8", "15"};
1410       for (int i = 0; i < nreg; i++) {
1411         tty->print("YMM%s:", ymm_name[i]);
1412         for (int j = 7; j >=0; j--) {
1413           tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
1414         }
1415         tty->cr();
1416       }
1417     }
1418   }
1419 #endif // COMPILER2 && ASSERT
1420 
1421   if ((supports_avx512ifma() && supports_avx512vlbw()) || supports_avxifma())  {
1422     if (FLAG_IS_DEFAULT(UsePoly1305Intrinsics)) {
1423       FLAG_SET_DEFAULT(UsePoly1305Intrinsics, true);
1424     }
1425   } else if (UsePoly1305Intrinsics) {
1426     warning("Intrinsics for Poly1305 crypto hash functions not available on this CPU.");
1427     FLAG_SET_DEFAULT(UsePoly1305Intrinsics, false);
1428   }
1429 
1430   if ((supports_avx512ifma() && supports_avx512vlbw()) || supports_avxifma()) {
1431     if (FLAG_IS_DEFAULT(UseIntPolyIntrinsics)) {
1432       FLAG_SET_DEFAULT(UseIntPolyIntrinsics, true);
1433     }
1434   } else if (UseIntPolyIntrinsics) {
1435     warning("Intrinsics for Polynomial crypto functions not available on this CPU.");
1436     FLAG_SET_DEFAULT(UseIntPolyIntrinsics, false);
1437   }
1438 
1439   if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1440     UseMultiplyToLenIntrinsic = true;
1441   }
1442   if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1443     UseSquareToLenIntrinsic = true;
1444   }
1445   if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1446     UseMulAddIntrinsic = true;
1447   }
1448   if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1449     UseMontgomeryMultiplyIntrinsic = true;
1450   }
1451   if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1452     UseMontgomerySquareIntrinsic = true;
1453   }
1454 #endif // COMPILER2_OR_JVMCI
1455 
1456   // On new cpus instructions which update whole XMM register should be used
1457   // to prevent partial register stall due to dependencies on high half.
1458   //
1459   // UseXmmLoadAndClearUpper == true  --> movsd(xmm, mem)
1460   // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
1461   // UseXmmRegToRegMoveAll == true  --> movaps(xmm, xmm), movapd(xmm, xmm).
1462   // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm),  movsd(xmm, xmm).
1463 
1464 
1465   if (is_zx()) { // ZX cpus specific settings
1466     if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1467       UseStoreImmI16 = false; // don't use it on ZX cpus
1468     }
1469     if ((cpu_family() == 6) || (cpu_family() == 7)) {
1470       if (FLAG_IS_DEFAULT(UseAddressNop)) {
1471         // Use it on all ZX cpus
1472         UseAddressNop = true;
1473       }
1474     }
1475     if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1476       UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
1477     }
1478     if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1479       if (supports_sse3()) {
1480         UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
1481       } else {
1482         UseXmmRegToRegMoveAll = false;
1483       }
1484     }
1485     if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
1486 #ifdef COMPILER2
1487       if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1488         // For new ZX cpus do the next optimization:
1489         // don't align the beginning of a loop if there are enough instructions
1490         // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1491         // in current fetch line (OptoLoopAlignment) or the padding
1492         // is big (> MaxLoopPad).
1493         // Set MaxLoopPad to 11 for new ZX cpus to reduce number of
1494         // generated NOP instructions. 11 is the largest size of one
1495         // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1496         MaxLoopPad = 11;
1497       }
1498 #endif // COMPILER2
1499       if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1500         UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
1501       }
1502       if (supports_sse4_2()) { // new ZX cpus
1503         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1504           UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
1505         }
1506       }
1507     }
1508 
1509     if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1510       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1511     }
1512   }
1513 
1514   if (is_amd_family()) { // AMD cpus specific settings
1515     if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
1516       // Use it on new AMD cpus starting from Opteron.
1517       UseAddressNop = true;
1518     }
1519     if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
1520       // Use it on new AMD cpus starting from Opteron.
1521       UseNewLongLShift = true;
1522     }
1523     if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1524       if (supports_sse4a()) {
1525         UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
1526       } else {
1527         UseXmmLoadAndClearUpper = false;
1528       }
1529     }
1530     if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1531       if (supports_sse4a()) {
1532         UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
1533       } else {
1534         UseXmmRegToRegMoveAll = false;
1535       }
1536     }
1537     if (FLAG_IS_DEFAULT(UseXmmI2F)) {
1538       if (supports_sse4a()) {
1539         UseXmmI2F = true;
1540       } else {
1541         UseXmmI2F = false;
1542       }
1543     }
1544     if (FLAG_IS_DEFAULT(UseXmmI2D)) {
1545       if (supports_sse4a()) {
1546         UseXmmI2D = true;
1547       } else {
1548         UseXmmI2D = false;
1549       }
1550     }
1551 
1552     // some defaults for AMD family 15h
1553     if (cpu_family() == 0x15) {
1554       // On family 15h processors default is no sw prefetch
1555       if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1556         FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1557       }
1558       // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
1559       if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1560         FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1561       }
1562       // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
1563       if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1564         FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1565       }
1566       if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1567         FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1568       }
1569     }
1570 
1571 #ifdef COMPILER2
1572     if (cpu_family() < 0x17 && MaxVectorSize > 16) {
1573       // Limit vectors size to 16 bytes on AMD cpus < 17h.
1574       FLAG_SET_DEFAULT(MaxVectorSize, 16);
1575     }
1576 #endif // COMPILER2
1577 
1578     // Some defaults for AMD family >= 17h && Hygon family 18h
1579     if (cpu_family() >= 0x17) {
1580       // On family >=17h processors use XMM and UnalignedLoadStores
1581       // for Array Copy
1582       if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1583         FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1584       }
1585       if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1586         FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1587       }
1588 #ifdef COMPILER2
1589       if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1590         FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1591       }
1592 #endif
1593     }
1594   }
1595 
1596   if (is_intel()) { // Intel cpus specific settings
1597     if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1598       UseStoreImmI16 = false; // don't use it on Intel cpus
1599     }
1600     if (is_intel_server_family() || cpu_family() == 15) {
1601       if (FLAG_IS_DEFAULT(UseAddressNop)) {
1602         // Use it on all Intel cpus starting from PentiumPro
1603         UseAddressNop = true;
1604       }
1605     }
1606     if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1607       UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
1608     }
1609     if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1610       if (supports_sse3()) {
1611         UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
1612       } else {
1613         UseXmmRegToRegMoveAll = false;
1614       }
1615     }
1616     if (is_intel_server_family() && supports_sse3()) { // New Intel cpus
1617 #ifdef COMPILER2
1618       if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1619         // For new Intel cpus do the next optimization:
1620         // don't align the beginning of a loop if there are enough instructions
1621         // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1622         // in current fetch line (OptoLoopAlignment) or the padding
1623         // is big (> MaxLoopPad).
1624         // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
1625         // generated NOP instructions. 11 is the largest size of one
1626         // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1627         MaxLoopPad = 11;
1628       }
1629 #endif // COMPILER2
1630 
1631       if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1632         UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
1633       }
1634       if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
1635         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1636           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1637         }
1638       }
1639     }
1640     if (is_atom_family() || is_knights_family()) {
1641 #ifdef COMPILER2
1642       if (FLAG_IS_DEFAULT(OptoScheduling)) {
1643         OptoScheduling = true;
1644       }
1645 #endif
1646       if (supports_sse4_2()) { // Silvermont
1647         if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1648           UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1649         }
1650       }
1651       if (FLAG_IS_DEFAULT(UseIncDec)) {
1652         FLAG_SET_DEFAULT(UseIncDec, false);
1653       }
1654     }
1655     if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1656       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1657     }
1658 #ifdef COMPILER2
1659     if (UseAVX > 2) {
1660       if (FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) ||
1661           (!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize) &&
1662            ArrayOperationPartialInlineSize != 0 &&
1663            ArrayOperationPartialInlineSize != 16 &&
1664            ArrayOperationPartialInlineSize != 32 &&
1665            ArrayOperationPartialInlineSize != 64)) {
1666         int inline_size = 0;
1667         if (MaxVectorSize >= 64 && AVX3Threshold == 0) {
1668           inline_size = 64;
1669         } else if (MaxVectorSize >= 32) {
1670           inline_size = 32;
1671         } else if (MaxVectorSize >= 16) {
1672           inline_size = 16;
1673         }
1674         if(!FLAG_IS_DEFAULT(ArrayOperationPartialInlineSize)) {
1675           warning("Setting ArrayOperationPartialInlineSize as %d", inline_size);
1676         }
1677         ArrayOperationPartialInlineSize = inline_size;
1678       }
1679 
1680       if (ArrayOperationPartialInlineSize > MaxVectorSize) {
1681         ArrayOperationPartialInlineSize = MaxVectorSize >= 16 ? MaxVectorSize : 0;
1682         if (ArrayOperationPartialInlineSize) {
1683           warning("Setting ArrayOperationPartialInlineSize as MaxVectorSize=%zd", MaxVectorSize);
1684         } else {
1685           warning("Setting ArrayOperationPartialInlineSize as %zd", ArrayOperationPartialInlineSize);
1686         }
1687       }
1688     }
1689 #endif
1690   }
1691 
1692 #ifdef COMPILER2
1693   if (FLAG_IS_DEFAULT(OptimizeFill)) {
1694     if (MaxVectorSize < 32 || (!EnableX86ECoreOpts && !VM_Version::supports_avx512vlbw())) {
1695       OptimizeFill = false;
1696     }
1697   }
1698 #endif
1699   if (supports_sse4_2()) {
1700     if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1701       FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1702     }
1703   } else {
1704     if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1705       warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1706     }
1707     FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1708   }
1709   if (UseSSE42Intrinsics) {
1710     if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1711       UseVectorizedMismatchIntrinsic = true;
1712     }
1713   } else if (UseVectorizedMismatchIntrinsic) {
1714     if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic))
1715       warning("vectorizedMismatch intrinsics are not available on this CPU");
1716     FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1717   }
1718   if (UseAVX >= 2) {
1719     FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, true);
1720   } else if (UseVectorizedHashCodeIntrinsic) {
1721     if (!FLAG_IS_DEFAULT(UseVectorizedHashCodeIntrinsic))
1722       warning("vectorizedHashCode intrinsics are not available on this CPU");
1723     FLAG_SET_DEFAULT(UseVectorizedHashCodeIntrinsic, false);
1724   }
1725 
1726   // Use count leading zeros count instruction if available.
1727   if (supports_lzcnt()) {
1728     if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
1729       UseCountLeadingZerosInstruction = true;
1730     }
1731    } else if (UseCountLeadingZerosInstruction) {
1732     warning("lzcnt instruction is not available on this CPU");
1733     FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
1734   }
1735 
1736   // Use count trailing zeros instruction if available
1737   if (supports_bmi1()) {
1738     // tzcnt does not require VEX prefix
1739     if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
1740       if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1741         // Don't use tzcnt if BMI1 is switched off on command line.
1742         UseCountTrailingZerosInstruction = false;
1743       } else {
1744         UseCountTrailingZerosInstruction = true;
1745       }
1746     }
1747   } else if (UseCountTrailingZerosInstruction) {
1748     warning("tzcnt instruction is not available on this CPU");
1749     FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
1750   }
1751 
1752   // BMI instructions (except tzcnt) use an encoding with VEX prefix.
1753   // VEX prefix is generated only when AVX > 0.
1754   if (supports_bmi1() && supports_avx()) {
1755     if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1756       UseBMI1Instructions = true;
1757     }
1758   } else if (UseBMI1Instructions) {
1759     warning("BMI1 instructions are not available on this CPU (AVX is also required)");
1760     FLAG_SET_DEFAULT(UseBMI1Instructions, false);
1761   }
1762 
1763   if (supports_bmi2() && supports_avx()) {
1764     if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
1765       UseBMI2Instructions = true;
1766     }
1767   } else if (UseBMI2Instructions) {
1768     warning("BMI2 instructions are not available on this CPU (AVX is also required)");
1769     FLAG_SET_DEFAULT(UseBMI2Instructions, false);
1770   }
1771 
1772   // Use population count instruction if available.
1773   if (supports_popcnt()) {
1774     if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
1775       UsePopCountInstruction = true;
1776     }
1777   } else if (UsePopCountInstruction) {
1778     warning("POPCNT instruction is not available on this CPU");
1779     FLAG_SET_DEFAULT(UsePopCountInstruction, false);
1780   }
1781 
1782   // Use fast-string operations if available.
1783   if (supports_erms()) {
1784     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1785       UseFastStosb = true;
1786     }
1787   } else if (UseFastStosb) {
1788     warning("fast-string operations are not available on this CPU");
1789     FLAG_SET_DEFAULT(UseFastStosb, false);
1790   }
1791 
1792   // For AMD Processors use XMM/YMM MOVDQU instructions
1793   // for Object Initialization as default
1794   if (is_amd() && cpu_family() >= 0x19) {
1795     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1796       UseFastStosb = false;
1797     }
1798   }
1799 
1800 #ifdef COMPILER2
1801   if (is_intel() && MaxVectorSize > 16) {
1802     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1803       UseFastStosb = false;
1804     }
1805   }
1806 #endif
1807 
1808   // Use XMM/YMM MOVDQU instruction for Object Initialization
1809   if (!UseFastStosb && UseUnalignedLoadStores) {
1810     if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1811       UseXMMForObjInit = true;
1812     }
1813   } else if (UseXMMForObjInit) {
1814     warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1815     FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1816   }
1817 
1818 #ifdef COMPILER2
1819   if (FLAG_IS_DEFAULT(AlignVector)) {
1820     // Modern processors allow misaligned memory operations for vectors.
1821     AlignVector = !UseUnalignedLoadStores;
1822   }
1823 #endif // COMPILER2
1824 
1825   if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1826     if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1827       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1828     } else if (!supports_sse() && supports_3dnow_prefetch()) {
1829       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1830     }
1831   }
1832 
1833   // Allocation prefetch settings
1834   int cache_line_size = checked_cast<int>(prefetch_data_size());
1835   if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) &&
1836       (cache_line_size > AllocatePrefetchStepSize)) {
1837     FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size);
1838   }
1839 
1840   if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) {
1841     assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0");
1842     if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1843       warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag.");
1844     }
1845     FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1846   }
1847 
1848   if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
1849     bool use_watermark_prefetch = (AllocatePrefetchStyle == 2);
1850     FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch));
1851   }
1852 
1853   if (is_intel() && is_intel_server_family() && supports_sse3()) {
1854     if (FLAG_IS_DEFAULT(AllocatePrefetchLines) &&
1855         supports_sse4_2() && supports_ht()) { // Nehalem based cpus
1856       FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
1857     }
1858 #ifdef COMPILER2
1859     if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) {
1860       FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1861     }
1862 #endif
1863   }
1864 
1865   if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
1866 #ifdef COMPILER2
1867     if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1868       FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1869     }
1870 #endif
1871   }
1872 
1873   // Prefetch settings
1874 
1875   // Prefetch interval for gc copy/scan == 9 dcache lines.  Derived from
1876   // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
1877   // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
1878   // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
1879 
1880   // gc copy/scan is disabled if prefetchw isn't supported, because
1881   // Prefetch::write emits an inlined prefetchw on Linux.
1882   // Do not use the 3dnow prefetchw instruction.  It isn't supported on em64t.
1883   // The used prefetcht0 instruction works for both amd64 and em64t.
1884 
1885   if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) {
1886     FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576);
1887   }
1888   if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) {
1889     FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576);
1890   }
1891 
1892   if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
1893      (cache_line_size > ContendedPaddingWidth))
1894      ContendedPaddingWidth = cache_line_size;
1895 
1896   // This machine allows unaligned memory accesses
1897   if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
1898     FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
1899   }
1900 
1901 #ifndef PRODUCT
1902   if (log_is_enabled(Info, os, cpu)) {
1903     LogStream ls(Log(os, cpu)::info());
1904     outputStream* log = &ls;
1905     log->print_cr("Logical CPUs per core: %u",
1906                   logical_processors_per_package());
1907     log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1908     log->print("UseSSE=%d", UseSSE);
1909     if (UseAVX > 0) {
1910       log->print("  UseAVX=%d", UseAVX);
1911     }
1912     if (UseAES) {
1913       log->print("  UseAES=1");
1914     }
1915 #ifdef COMPILER2
1916     if (MaxVectorSize > 0) {
1917       log->print("  MaxVectorSize=%d", (int) MaxVectorSize);
1918     }
1919 #endif
1920     log->cr();
1921     log->print("Allocation");
1922     if (AllocatePrefetchStyle <= 0) {
1923       log->print_cr(": no prefetching");
1924     } else {
1925       log->print(" prefetching: ");
1926       if (AllocatePrefetchInstr == 0) {
1927         log->print("PREFETCHNTA");
1928       } else if (AllocatePrefetchInstr == 1) {
1929         log->print("PREFETCHT0");
1930       } else if (AllocatePrefetchInstr == 2) {
1931         log->print("PREFETCHT2");
1932       } else if (AllocatePrefetchInstr == 3) {
1933         log->print("PREFETCHW");
1934       }
1935       if (AllocatePrefetchLines > 1) {
1936         log->print_cr(" at distance %d, %d lines of %d bytes", AllocatePrefetchDistance, AllocatePrefetchLines, AllocatePrefetchStepSize);
1937       } else {
1938         log->print_cr(" at distance %d, one line of %d bytes", AllocatePrefetchDistance, AllocatePrefetchStepSize);
1939       }
1940     }
1941 
1942     if (PrefetchCopyIntervalInBytes > 0) {
1943       log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1944     }
1945     if (PrefetchScanIntervalInBytes > 0) {
1946       log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1947     }
1948     if (ContendedPaddingWidth > 0) {
1949       log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1950     }
1951   }
1952 #endif // !PRODUCT
1953   if (FLAG_IS_DEFAULT(UseSignumIntrinsic)) {
1954       FLAG_SET_DEFAULT(UseSignumIntrinsic, true);
1955   }
1956   if (FLAG_IS_DEFAULT(UseCopySignIntrinsic)) {
1957       FLAG_SET_DEFAULT(UseCopySignIntrinsic, true);
1958   }
1959 }
1960 
1961 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1962   VirtualizationType vrt = VM_Version::get_detected_virtualization();
1963   if (vrt == XenHVM) {
1964     st->print_cr("Xen hardware-assisted virtualization detected");
1965   } else if (vrt == KVM) {
1966     st->print_cr("KVM virtualization detected");
1967   } else if (vrt == VMWare) {
1968     st->print_cr("VMWare virtualization detected");
1969     VirtualizationSupport::print_virtualization_info(st);
1970   } else if (vrt == HyperV) {
1971     st->print_cr("Hyper-V virtualization detected");
1972   } else if (vrt == HyperVRole) {
1973     st->print_cr("Hyper-V role detected");
1974   }
1975 }
1976 
1977 bool VM_Version::compute_has_intel_jcc_erratum() {
1978   if (!is_intel_family_core()) {
1979     // Only Intel CPUs are affected.
1980     return false;
1981   }
1982   // The following table of affected CPUs is based on the following document released by Intel:
1983   // https://www.intel.com/content/dam/support/us/en/documents/processors/mitigations-jump-conditional-code-erratum.pdf
1984   switch (_model) {
1985   case 0x8E:
1986     // 06_8EH | 9 | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Amber Lake Y
1987     // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake U
1988     // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake U 23e
1989     // 06_8EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake Y
1990     // 06_8EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake U43e
1991     // 06_8EH | B | 8th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Whiskey Lake U
1992     // 06_8EH | C | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Amber Lake Y
1993     // 06_8EH | C | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake U42
1994     // 06_8EH | C | 8th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Whiskey Lake U
1995     return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xC;
1996   case 0x4E:
1997     // 06_4E  | 3 | 6th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Skylake U
1998     // 06_4E  | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake U23e
1999     // 06_4E  | 3 | 6th Generation Intel(R) Core(TM) Processors based on microarchitecture code name Skylake Y
2000     return _stepping == 0x3;
2001   case 0x55:
2002     // 06_55H | 4 | Intel(R) Xeon(R) Processor D Family based on microarchitecture code name Skylake D, Bakerville
2003     // 06_55H | 4 | Intel(R) Xeon(R) Scalable Processors based on microarchitecture code name Skylake Server
2004     // 06_55H | 4 | Intel(R) Xeon(R) Processor W Family based on microarchitecture code name Skylake W
2005     // 06_55H | 4 | Intel(R) Core(TM) X-series Processors based on microarchitecture code name Skylake X
2006     // 06_55H | 4 | Intel(R) Xeon(R) Processor E3 v5 Family based on microarchitecture code name Skylake Xeon E3
2007     // 06_55  | 7 | 2nd Generation Intel(R) Xeon(R) Scalable Processors based on microarchitecture code name Cascade Lake (server)
2008     return _stepping == 0x4 || _stepping == 0x7;
2009   case 0x5E:
2010     // 06_5E  | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake H
2011     // 06_5E  | 3 | 6th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Skylake S
2012     return _stepping == 0x3;
2013   case 0x9E:
2014     // 06_9EH | 9 | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake G
2015     // 06_9EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake H
2016     // 06_9EH | 9 | 7th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake S
2017     // 06_9EH | 9 | Intel(R) Core(TM) X-series Processors based on microarchitecture code name Kaby Lake X
2018     // 06_9EH | 9 | Intel(R) Xeon(R) Processor E3 v6 Family Kaby Lake Xeon E3
2019     // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake H
2020     // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S
2021     // 06_9EH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (6+2) x/KBP
2022     // 06_9EH | A | Intel(R) Xeon(R) Processor E Family based on microarchitecture code name Coffee Lake S (6+2)
2023     // 06_9EH | A | Intel(R) Xeon(R) Processor E Family based on microarchitecture code name Coffee Lake S (4+2)
2024     // 06_9EH | B | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (4+2)
2025     // 06_9EH | B | Intel(R) Celeron(R) Processor G Series based on microarchitecture code name Coffee Lake S (4+2)
2026     // 06_9EH | D | 9th Generation Intel(R) Core(TM) Processor Family based on microarchitecturecode name Coffee Lake H (8+2)
2027     // 06_9EH | D | 9th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Coffee Lake S (8+2)
2028     return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xD;
2029   case 0xA5:
2030     // Not in Intel documentation.
2031     // 06_A5H |    | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake S/H
2032     return true;
2033   case 0xA6:
2034     // 06_A6H | 0  | 10th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Comet Lake U62
2035     return _stepping == 0x0;
2036   case 0xAE:
2037     // 06_AEH | A | 8th Generation Intel(R) Core(TM) Processor Family based on microarchitecture code name Kaby Lake Refresh U (4+2)
2038     return _stepping == 0xA;
2039   default:
2040     // If we are running on another intel machine not recognized in the table, we are okay.
2041     return false;
2042   }
2043 }
2044 
2045 // On Xen, the cpuid instruction returns
2046 //  eax / registers[0]: Version of Xen
2047 //  ebx / registers[1]: chars 'XenV'
2048 //  ecx / registers[2]: chars 'MMXe'
2049 //  edx / registers[3]: chars 'nVMM'
2050 //
2051 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
2052 //  ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
2053 //  ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
2054 //  edx / registers[3]: chars 'M'    / 'ware' / 't Hv'
2055 //
2056 // more information :
2057 // https://kb.vmware.com/s/article/1009458
2058 //
2059 void VM_Version::check_virtualizations() {
2060   uint32_t registers[4] = {0};
2061   char signature[13] = {0};
2062 
2063   // Xen cpuid leaves can be found 0x100 aligned boundary starting
2064   // from 0x40000000 until 0x40010000.
2065   //   https://lists.linuxfoundation.org/pipermail/virtualization/2012-May/019974.html
2066   for (int leaf = 0x40000000; leaf < 0x40010000; leaf += 0x100) {
2067     detect_virt_stub(leaf, registers);
2068     memcpy(signature, &registers[1], 12);
2069 
2070     if (strncmp("VMwareVMware", signature, 12) == 0) {
2071       Abstract_VM_Version::_detected_virtualization = VMWare;
2072       // check for extended metrics from guestlib
2073       VirtualizationSupport::initialize();
2074     } else if (strncmp("Microsoft Hv", signature, 12) == 0) {
2075       Abstract_VM_Version::_detected_virtualization = HyperV;
2076 #ifdef _WINDOWS
2077       // CPUID leaf 0x40000007 is available to the root partition only.
2078       // See Hypervisor Top Level Functional Specification section 2.4.8 for more details.
2079       //   https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/master/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v6.0b.pdf
2080       detect_virt_stub(0x40000007, registers);
2081       if ((registers[0] != 0x0) ||
2082           (registers[1] != 0x0) ||
2083           (registers[2] != 0x0) ||
2084           (registers[3] != 0x0)) {
2085         Abstract_VM_Version::_detected_virtualization = HyperVRole;
2086       }
2087 #endif
2088     } else if (strncmp("KVMKVMKVM", signature, 9) == 0) {
2089       Abstract_VM_Version::_detected_virtualization = KVM;
2090     } else if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
2091       Abstract_VM_Version::_detected_virtualization = XenHVM;
2092     }
2093   }
2094 }
2095 
2096 #ifdef COMPILER2
2097 // Determine if it's running on Cascade Lake using default options.
2098 bool VM_Version::is_default_intel_cascade_lake() {
2099   return FLAG_IS_DEFAULT(UseAVX) &&
2100          FLAG_IS_DEFAULT(MaxVectorSize) &&
2101          UseAVX > 2 &&
2102          is_intel_cascade_lake();
2103 }
2104 #endif
2105 
2106 bool VM_Version::is_intel_cascade_lake() {
2107   return is_intel_skylake() && _stepping >= 5;
2108 }
2109 
2110 bool VM_Version::is_intel_darkmont() {
2111   return is_intel() && is_intel_server_family() && (_model == 0xCC || _model == 0xDD);
2112 }
2113 
2114 // avx3_threshold() sets the threshold at which 64-byte instructions are used
2115 // for implementing the array copy and clear operations.
2116 // The Intel platforms that supports the serialize instruction
2117 // has improved implementation of 64-byte load/stores and so the default
2118 // threshold is set to 0 for these platforms.
2119 int VM_Version::avx3_threshold() {
2120   return (is_intel_server_family() &&
2121           supports_serialize() &&
2122           FLAG_IS_DEFAULT(AVX3Threshold)) ? 0 : AVX3Threshold;
2123 }
2124 
2125 void VM_Version::clear_apx_test_state() {
2126   clear_apx_test_state_stub();
2127 }
2128 
2129 static bool _vm_version_initialized = false;
2130 
2131 void VM_Version::initialize() {
2132   ResourceMark rm;
2133 
2134   // Making this stub must be FIRST use of assembler
2135   stub_blob = BufferBlob::create("VM_Version stub", stub_size);
2136   if (stub_blob == nullptr) {
2137     vm_exit_during_initialization("Unable to allocate stub for VM_Version");
2138   }
2139   CodeBuffer c(stub_blob);
2140   VM_Version_StubGenerator g(&c);
2141 
2142   get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
2143                                      g.generate_get_cpu_info());
2144   detect_virt_stub = CAST_TO_FN_PTR(detect_virt_stub_t,
2145                                      g.generate_detect_virt());
2146   clear_apx_test_state_stub = CAST_TO_FN_PTR(clear_apx_test_state_t,
2147                                      g.clear_apx_test_state());
2148   getCPUIDBrandString_stub = CAST_TO_FN_PTR(getCPUIDBrandString_stub_t,
2149                                      g.generate_getCPUIDBrandString());
2150   get_processor_features();
2151 
2152   Assembler::precompute_instructions();
2153 
2154   if (VM_Version::supports_hv()) { // Supports hypervisor
2155     check_virtualizations();
2156   }
2157   _vm_version_initialized = true;
2158 }
2159 
2160 typedef enum {
2161    CPU_FAMILY_8086_8088  = 0,
2162    CPU_FAMILY_INTEL_286  = 2,
2163    CPU_FAMILY_INTEL_386  = 3,
2164    CPU_FAMILY_INTEL_486  = 4,
2165    CPU_FAMILY_PENTIUM    = 5,
2166    CPU_FAMILY_PENTIUMPRO = 6,    // Same family several models
2167    CPU_FAMILY_PENTIUM_4  = 0xF
2168 } FamilyFlag;
2169 
2170 typedef enum {
2171   RDTSCP_FLAG  = 0x08000000, // bit 27
2172   INTEL64_FLAG = 0x20000000  // bit 29
2173 } _featureExtendedEdxFlag;
2174 
2175 typedef enum {
2176    FPU_FLAG     = 0x00000001,
2177    VME_FLAG     = 0x00000002,
2178    DE_FLAG      = 0x00000004,
2179    PSE_FLAG     = 0x00000008,
2180    TSC_FLAG     = 0x00000010,
2181    MSR_FLAG     = 0x00000020,
2182    PAE_FLAG     = 0x00000040,
2183    MCE_FLAG     = 0x00000080,
2184    CX8_FLAG     = 0x00000100,
2185    APIC_FLAG    = 0x00000200,
2186    SEP_FLAG     = 0x00000800,
2187    MTRR_FLAG    = 0x00001000,
2188    PGE_FLAG     = 0x00002000,
2189    MCA_FLAG     = 0x00004000,
2190    CMOV_FLAG    = 0x00008000,
2191    PAT_FLAG     = 0x00010000,
2192    PSE36_FLAG   = 0x00020000,
2193    PSNUM_FLAG   = 0x00040000,
2194    CLFLUSH_FLAG = 0x00080000,
2195    DTS_FLAG     = 0x00200000,
2196    ACPI_FLAG    = 0x00400000,
2197    MMX_FLAG     = 0x00800000,
2198    FXSR_FLAG    = 0x01000000,
2199    SSE_FLAG     = 0x02000000,
2200    SSE2_FLAG    = 0x04000000,
2201    SS_FLAG      = 0x08000000,
2202    HTT_FLAG     = 0x10000000,
2203    TM_FLAG      = 0x20000000
2204 } FeatureEdxFlag;
2205 
2206 // VM_Version statics
2207 enum {
2208   ExtendedFamilyIdLength_INTEL = 16,
2209   ExtendedFamilyIdLength_AMD   = 24
2210 };
2211 
2212 const size_t VENDOR_LENGTH = 13;
2213 const size_t CPU_EBS_MAX_LENGTH = (3 * 4 * 4 + 1);
2214 static char* _cpu_brand_string = nullptr;
2215 static int64_t _max_qualified_cpu_frequency = 0;
2216 
2217 static int _no_of_threads = 0;
2218 static int _no_of_cores = 0;
2219 
2220 const char* const _family_id_intel[ExtendedFamilyIdLength_INTEL] = {
2221   "8086/8088",
2222   "",
2223   "286",
2224   "386",
2225   "486",
2226   "Pentium",
2227   "Pentium Pro",   //or Pentium-M/Woodcrest depending on model
2228   "",
2229   "",
2230   "",
2231   "",
2232   "",
2233   "",
2234   "",
2235   "",
2236   "Pentium 4"
2237 };
2238 
2239 const char* const _family_id_amd[ExtendedFamilyIdLength_AMD] = {
2240   "",
2241   "",
2242   "",
2243   "",
2244   "5x86",
2245   "K5/K6",
2246   "Athlon/AthlonXP",
2247   "",
2248   "",
2249   "",
2250   "",
2251   "",
2252   "",
2253   "",
2254   "",
2255   "Opteron/Athlon64",
2256   "Opteron QC/Phenom",  // Barcelona et.al.
2257   "",
2258   "",
2259   "",
2260   "",
2261   "",
2262   "",
2263   "Zen"
2264 };
2265 // Partially from Intel 64 and IA-32 Architecture Software Developer's Manual,
2266 // September 2013, Vol 3C Table 35-1
2267 const char* const _model_id_pentium_pro[] = {
2268   "",
2269   "Pentium Pro",
2270   "",
2271   "Pentium II model 3",
2272   "",
2273   "Pentium II model 5/Xeon/Celeron",
2274   "Celeron",
2275   "Pentium III/Pentium III Xeon",
2276   "Pentium III/Pentium III Xeon",
2277   "Pentium M model 9",    // Yonah
2278   "Pentium III, model A",
2279   "Pentium III, model B",
2280   "",
2281   "Pentium M model D",    // Dothan
2282   "",
2283   "Core 2",               // 0xf Woodcrest/Conroe/Merom/Kentsfield/Clovertown
2284   "",
2285   "",
2286   "",
2287   "",
2288   "",
2289   "",
2290   "Celeron",              // 0x16 Celeron 65nm
2291   "Core 2",               // 0x17 Penryn / Harpertown
2292   "",
2293   "",
2294   "Core i7",              // 0x1A CPU_MODEL_NEHALEM_EP
2295   "Atom",                 // 0x1B Z5xx series Silverthorn
2296   "",
2297   "Core 2",               // 0x1D Dunnington (6-core)
2298   "Nehalem",              // 0x1E CPU_MODEL_NEHALEM
2299   "",
2300   "",
2301   "",
2302   "",
2303   "",
2304   "",
2305   "Westmere",             // 0x25 CPU_MODEL_WESTMERE
2306   "",
2307   "",
2308   "",                     // 0x28
2309   "",
2310   "Sandy Bridge",         // 0x2a "2nd Generation Intel Core i7, i5, i3"
2311   "",
2312   "Westmere-EP",          // 0x2c CPU_MODEL_WESTMERE_EP
2313   "Sandy Bridge-EP",      // 0x2d CPU_MODEL_SANDYBRIDGE_EP
2314   "Nehalem-EX",           // 0x2e CPU_MODEL_NEHALEM_EX
2315   "Westmere-EX",          // 0x2f CPU_MODEL_WESTMERE_EX
2316   "",
2317   "",
2318   "",
2319   "",
2320   "",
2321   "",
2322   "",
2323   "",
2324   "",
2325   "",
2326   "Ivy Bridge",           // 0x3a
2327   "",
2328   "Haswell",              // 0x3c "4th Generation Intel Core Processor"
2329   "",                     // 0x3d "Next Generation Intel Core Processor"
2330   "Ivy Bridge-EP",        // 0x3e "Next Generation Intel Xeon Processor E7 Family"
2331   "",                     // 0x3f "Future Generation Intel Xeon Processor"
2332   "",
2333   "",
2334   "",
2335   "",
2336   "",
2337   "Haswell",              // 0x45 "4th Generation Intel Core Processor"
2338   "Haswell",              // 0x46 "4th Generation Intel Core Processor"
2339   nullptr
2340 };
2341 
2342 /* Brand ID is for back compatibility
2343  * Newer CPUs uses the extended brand string */
2344 const char* const _brand_id[] = {
2345   "",
2346   "Celeron processor",
2347   "Pentium III processor",
2348   "Intel Pentium III Xeon processor",
2349   "",
2350   "",
2351   "",
2352   "",
2353   "Intel Pentium 4 processor",
2354   nullptr
2355 };
2356 
2357 
2358 const char* const _feature_edx_id[] = {
2359   "On-Chip FPU",
2360   "Virtual Mode Extensions",
2361   "Debugging Extensions",
2362   "Page Size Extensions",
2363   "Time Stamp Counter",
2364   "Model Specific Registers",
2365   "Physical Address Extension",
2366   "Machine Check Exceptions",
2367   "CMPXCHG8B Instruction",
2368   "On-Chip APIC",
2369   "",
2370   "Fast System Call",
2371   "Memory Type Range Registers",
2372   "Page Global Enable",
2373   "Machine Check Architecture",
2374   "Conditional Mov Instruction",
2375   "Page Attribute Table",
2376   "36-bit Page Size Extension",
2377   "Processor Serial Number",
2378   "CLFLUSH Instruction",
2379   "",
2380   "Debug Trace Store feature",
2381   "ACPI registers in MSR space",
2382   "Intel Architecture MMX Technology",
2383   "Fast Float Point Save and Restore",
2384   "Streaming SIMD extensions",
2385   "Streaming SIMD extensions 2",
2386   "Self-Snoop",
2387   "Hyper Threading",
2388   "Thermal Monitor",
2389   "",
2390   "Pending Break Enable"
2391 };
2392 
2393 const char* const _feature_extended_edx_id[] = {
2394   "",
2395   "",
2396   "",
2397   "",
2398   "",
2399   "",
2400   "",
2401   "",
2402   "",
2403   "",
2404   "",
2405   "SYSCALL/SYSRET",
2406   "",
2407   "",
2408   "",
2409   "",
2410   "",
2411   "",
2412   "",
2413   "",
2414   "Execute Disable Bit",
2415   "",
2416   "",
2417   "",
2418   "",
2419   "",
2420   "",
2421   "RDTSCP",
2422   "",
2423   "Intel 64 Architecture",
2424   "",
2425   ""
2426 };
2427 
2428 const char* const _feature_ecx_id[] = {
2429   "Streaming SIMD Extensions 3",
2430   "PCLMULQDQ",
2431   "64-bit DS Area",
2432   "MONITOR/MWAIT instructions",
2433   "CPL Qualified Debug Store",
2434   "Virtual Machine Extensions",
2435   "Safer Mode Extensions",
2436   "Enhanced Intel SpeedStep technology",
2437   "Thermal Monitor 2",
2438   "Supplemental Streaming SIMD Extensions 3",
2439   "L1 Context ID",
2440   "",
2441   "Fused Multiply-Add",
2442   "CMPXCHG16B",
2443   "xTPR Update Control",
2444   "Perfmon and Debug Capability",
2445   "",
2446   "Process-context identifiers",
2447   "Direct Cache Access",
2448   "Streaming SIMD extensions 4.1",
2449   "Streaming SIMD extensions 4.2",
2450   "x2APIC",
2451   "MOVBE",
2452   "Popcount instruction",
2453   "TSC-Deadline",
2454   "AESNI",
2455   "XSAVE",
2456   "OSXSAVE",
2457   "AVX",
2458   "F16C",
2459   "RDRAND",
2460   ""
2461 };
2462 
2463 const char* const _feature_extended_ecx_id[] = {
2464   "LAHF/SAHF instruction support",
2465   "Core multi-processor legacy mode",
2466   "",
2467   "",
2468   "",
2469   "Advanced Bit Manipulations: LZCNT",
2470   "SSE4A: MOVNTSS, MOVNTSD, EXTRQ, INSERTQ",
2471   "Misaligned SSE mode",
2472   "",
2473   "",
2474   "",
2475   "",
2476   "",
2477   "",
2478   "",
2479   "",
2480   "",
2481   "",
2482   "",
2483   "",
2484   "",
2485   "",
2486   "",
2487   "",
2488   "",
2489   "",
2490   "",
2491   "",
2492   "",
2493   "",
2494   "",
2495   ""
2496 };
2497 
2498 const char* VM_Version::cpu_model_description(void) {
2499   uint32_t cpu_family = extended_cpu_family();
2500   uint32_t cpu_model = extended_cpu_model();
2501   const char* model = nullptr;
2502 
2503   if (cpu_family == CPU_FAMILY_PENTIUMPRO) {
2504     for (uint32_t i = 0; i <= cpu_model; i++) {
2505       model = _model_id_pentium_pro[i];
2506       if (model == nullptr) {
2507         break;
2508       }
2509     }
2510   }
2511   return model;
2512 }
2513 
2514 const char* VM_Version::cpu_brand_string(void) {
2515   if (_cpu_brand_string == nullptr) {
2516     _cpu_brand_string = NEW_C_HEAP_ARRAY_RETURN_NULL(char, CPU_EBS_MAX_LENGTH, mtInternal);
2517     if (nullptr == _cpu_brand_string) {
2518       return nullptr;
2519     }
2520     int ret_val = cpu_extended_brand_string(_cpu_brand_string, CPU_EBS_MAX_LENGTH);
2521     if (ret_val != OS_OK) {
2522       FREE_C_HEAP_ARRAY(char, _cpu_brand_string);
2523       _cpu_brand_string = nullptr;
2524     }
2525   }
2526   return _cpu_brand_string;
2527 }
2528 
2529 const char* VM_Version::cpu_brand(void) {
2530   const char*  brand  = nullptr;
2531 
2532   if ((_cpuid_info.std_cpuid1_ebx.value & 0xFF) > 0) {
2533     int brand_num = _cpuid_info.std_cpuid1_ebx.value & 0xFF;
2534     brand = _brand_id[0];
2535     for (int i = 0; brand != nullptr && i <= brand_num; i += 1) {
2536       brand = _brand_id[i];
2537     }
2538   }
2539   return brand;
2540 }
2541 
2542 bool VM_Version::cpu_is_em64t(void) {
2543   return ((_cpuid_info.ext_cpuid1_edx.value & INTEL64_FLAG) == INTEL64_FLAG);
2544 }
2545 
2546 bool VM_Version::is_netburst(void) {
2547   return (is_intel() && (extended_cpu_family() == CPU_FAMILY_PENTIUM_4));
2548 }
2549 
2550 bool VM_Version::supports_tscinv_ext(void) {
2551   if (!supports_tscinv_bit()) {
2552     return false;
2553   }
2554 
2555   if (is_intel()) {
2556     return true;
2557   }
2558 
2559   if (is_amd()) {
2560     return !is_amd_Barcelona();
2561   }
2562 
2563   if (is_hygon()) {
2564     return true;
2565   }
2566 
2567   return false;
2568 }
2569 
2570 void VM_Version::resolve_cpu_information_details(void) {
2571 
2572   // in future we want to base this information on proper cpu
2573   // and cache topology enumeration such as:
2574   // Intel 64 Architecture Processor Topology Enumeration
2575   // which supports system cpu and cache topology enumeration
2576   // either using 2xAPICIDs or initial APICIDs
2577 
2578   // currently only rough cpu information estimates
2579   // which will not necessarily reflect the exact configuration of the system
2580 
2581   // this is the number of logical hardware threads
2582   // visible to the operating system
2583   _no_of_threads = os::processor_count();
2584 
2585   // find out number of threads per cpu package
2586   int threads_per_package = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus;
2587   if (threads_per_package == 0) {
2588     // Fallback code to avoid div by zero in subsequent code.
2589     // CPUID 0Bh (ECX = 1) might return 0 on older AMD processor (EPYC 7763 at least)
2590     threads_per_package = threads_per_core() * cores_per_cpu();
2591   }
2592 
2593   // use amount of threads visible to the process in order to guess number of sockets
2594   _no_of_sockets = _no_of_threads / threads_per_package;
2595 
2596   // process might only see a subset of the total number of threads
2597   // from a single processor package. Virtualization/resource management for example.
2598   // If so then just write a hard 1 as num of pkgs.
2599   if (0 == _no_of_sockets) {
2600     _no_of_sockets = 1;
2601   }
2602 
2603   // estimate the number of cores
2604   _no_of_cores = cores_per_cpu() * _no_of_sockets;
2605 }
2606 
2607 
2608 const char* VM_Version::cpu_family_description(void) {
2609   int cpu_family_id = extended_cpu_family();
2610   if (is_amd()) {
2611     if (cpu_family_id < ExtendedFamilyIdLength_AMD) {
2612       return _family_id_amd[cpu_family_id];
2613     }
2614   }
2615   if (is_intel()) {
2616     if (cpu_family_id == CPU_FAMILY_PENTIUMPRO) {
2617       return cpu_model_description();
2618     }
2619     if (cpu_family_id < ExtendedFamilyIdLength_INTEL) {
2620       return _family_id_intel[cpu_family_id];
2621     }
2622   }
2623   if (is_hygon()) {
2624     return "Dhyana";
2625   }
2626   return "Unknown x86";
2627 }
2628 
2629 int VM_Version::cpu_type_description(char* const buf, size_t buf_len) {
2630   assert(buf != nullptr, "buffer is null!");
2631   assert(buf_len >= CPU_TYPE_DESC_BUF_SIZE, "buffer len should at least be == CPU_TYPE_DESC_BUF_SIZE!");
2632 
2633   const char* cpu_type = nullptr;
2634   const char* x64 = nullptr;
2635 
2636   if (is_intel()) {
2637     cpu_type = "Intel";
2638     x64 = cpu_is_em64t() ? " Intel64" : "";
2639   } else if (is_amd()) {
2640     cpu_type = "AMD";
2641     x64 = cpu_is_em64t() ? " AMD64" : "";
2642   } else if (is_hygon()) {
2643     cpu_type = "Hygon";
2644     x64 = cpu_is_em64t() ? " AMD64" : "";
2645   } else {
2646     cpu_type = "Unknown x86";
2647     x64 = cpu_is_em64t() ? " x86_64" : "";
2648   }
2649 
2650   jio_snprintf(buf, buf_len, "%s %s%s SSE SSE2%s%s%s%s%s%s%s%s",
2651     cpu_type,
2652     cpu_family_description(),
2653     supports_ht() ? " (HT)" : "",
2654     supports_sse3() ? " SSE3" : "",
2655     supports_ssse3() ? " SSSE3" : "",
2656     supports_sse4_1() ? " SSE4.1" : "",
2657     supports_sse4_2() ? " SSE4.2" : "",
2658     supports_sse4a() ? " SSE4A" : "",
2659     is_netburst() ? " Netburst" : "",
2660     is_intel_family_core() ? " Core" : "",
2661     x64);
2662 
2663   return OS_OK;
2664 }
2665 
2666 int VM_Version::cpu_extended_brand_string(char* const buf, size_t buf_len) {
2667   assert(buf != nullptr, "buffer is null!");
2668   assert(buf_len >= CPU_EBS_MAX_LENGTH, "buffer len should at least be == CPU_EBS_MAX_LENGTH!");
2669   assert(getCPUIDBrandString_stub != nullptr, "not initialized");
2670 
2671   // invoke newly generated asm code to fetch CPU Brand String
2672   getCPUIDBrandString_stub(&_cpuid_info);
2673 
2674   // fetch results into buffer
2675   *((uint32_t*) &buf[0])  = _cpuid_info.proc_name_0;
2676   *((uint32_t*) &buf[4])  = _cpuid_info.proc_name_1;
2677   *((uint32_t*) &buf[8])  = _cpuid_info.proc_name_2;
2678   *((uint32_t*) &buf[12]) = _cpuid_info.proc_name_3;
2679   *((uint32_t*) &buf[16]) = _cpuid_info.proc_name_4;
2680   *((uint32_t*) &buf[20]) = _cpuid_info.proc_name_5;
2681   *((uint32_t*) &buf[24]) = _cpuid_info.proc_name_6;
2682   *((uint32_t*) &buf[28]) = _cpuid_info.proc_name_7;
2683   *((uint32_t*) &buf[32]) = _cpuid_info.proc_name_8;
2684   *((uint32_t*) &buf[36]) = _cpuid_info.proc_name_9;
2685   *((uint32_t*) &buf[40]) = _cpuid_info.proc_name_10;
2686   *((uint32_t*) &buf[44]) = _cpuid_info.proc_name_11;
2687 
2688   return OS_OK;
2689 }
2690 
2691 size_t VM_Version::cpu_write_support_string(char* const buf, size_t buf_len) {
2692   guarantee(buf != nullptr, "buffer is null!");
2693   guarantee(buf_len > 0, "buffer len not enough!");
2694 
2695   unsigned int flag = 0;
2696   unsigned int fi = 0;
2697   size_t       written = 0;
2698   const char*  prefix = "";
2699 
2700 #define WRITE_TO_BUF(string)                                                          \
2701   {                                                                                   \
2702     int res = jio_snprintf(&buf[written], buf_len - written, "%s%s", prefix, string); \
2703     if (res < 0) {                                                                    \
2704       return buf_len - 1;                                                             \
2705     }                                                                                 \
2706     written += res;                                                                   \
2707     if (prefix[0] == '\0') {                                                          \
2708       prefix = ", ";                                                                  \
2709     }                                                                                 \
2710   }
2711 
2712   for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
2713     if (flag == HTT_FLAG && (((_cpuid_info.std_cpuid1_ebx.value >> 16) & 0xff) <= 1)) {
2714       continue; /* no hyperthreading */
2715     } else if (flag == SEP_FLAG && (cpu_family() == CPU_FAMILY_PENTIUMPRO && ((_cpuid_info.std_cpuid1_eax.value & 0xff) < 0x33))) {
2716       continue; /* no fast system call */
2717     }
2718     if ((_cpuid_info.std_cpuid1_edx.value & flag) && strlen(_feature_edx_id[fi]) > 0) {
2719       WRITE_TO_BUF(_feature_edx_id[fi]);
2720     }
2721   }
2722 
2723   for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
2724     if ((_cpuid_info.std_cpuid1_ecx.value & flag) && strlen(_feature_ecx_id[fi]) > 0) {
2725       WRITE_TO_BUF(_feature_ecx_id[fi]);
2726     }
2727   }
2728 
2729   for (flag = 1, fi = 0; flag <= 0x20000000 ; flag <<= 1, fi++) {
2730     if ((_cpuid_info.ext_cpuid1_ecx.value & flag) && strlen(_feature_extended_ecx_id[fi]) > 0) {
2731       WRITE_TO_BUF(_feature_extended_ecx_id[fi]);
2732     }
2733   }
2734 
2735   for (flag = 1, fi = 0; flag <= 0x20000000; flag <<= 1, fi++) {
2736     if ((_cpuid_info.ext_cpuid1_edx.value & flag) && strlen(_feature_extended_edx_id[fi]) > 0) {
2737       WRITE_TO_BUF(_feature_extended_edx_id[fi]);
2738     }
2739   }
2740 
2741   if (supports_tscinv_bit()) {
2742       WRITE_TO_BUF("Invariant TSC");
2743   }
2744 
2745   if (supports_hybrid()) {
2746       WRITE_TO_BUF("Hybrid Architecture");
2747   }
2748 
2749   return written;
2750 }
2751 
2752 /**
2753  * Write a detailed description of the cpu to a given buffer, including
2754  * feature set.
2755  */
2756 int VM_Version::cpu_detailed_description(char* const buf, size_t buf_len) {
2757   assert(buf != nullptr, "buffer is null!");
2758   assert(buf_len >= CPU_DETAILED_DESC_BUF_SIZE, "buffer len should at least be == CPU_DETAILED_DESC_BUF_SIZE!");
2759 
2760   static const char* unknown = "<unknown>";
2761   char               vendor_id[VENDOR_LENGTH];
2762   const char*        family = nullptr;
2763   const char*        model = nullptr;
2764   const char*        brand = nullptr;
2765   int                outputLen = 0;
2766 
2767   family = cpu_family_description();
2768   if (family == nullptr) {
2769     family = unknown;
2770   }
2771 
2772   model = cpu_model_description();
2773   if (model == nullptr) {
2774     model = unknown;
2775   }
2776 
2777   brand = cpu_brand_string();
2778 
2779   if (brand == nullptr) {
2780     brand = cpu_brand();
2781     if (brand == nullptr) {
2782       brand = unknown;
2783     }
2784   }
2785 
2786   *((uint32_t*) &vendor_id[0]) = _cpuid_info.std_vendor_name_0;
2787   *((uint32_t*) &vendor_id[4]) = _cpuid_info.std_vendor_name_2;
2788   *((uint32_t*) &vendor_id[8]) = _cpuid_info.std_vendor_name_1;
2789   vendor_id[VENDOR_LENGTH-1] = '\0';
2790 
2791   outputLen = jio_snprintf(buf, buf_len, "Brand: %s, Vendor: %s\n"
2792     "Family: %s (0x%x), Model: %s (0x%x), Stepping: 0x%x\n"
2793     "Ext. family: 0x%x, Ext. model: 0x%x, Type: 0x%x, Signature: 0x%8.8x\n"
2794     "Features: ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
2795     "Ext. features: eax: 0x%8.8x, ebx: 0x%8.8x, ecx: 0x%8.8x, edx: 0x%8.8x\n"
2796     "Supports: ",
2797     brand,
2798     vendor_id,
2799     family,
2800     extended_cpu_family(),
2801     model,
2802     extended_cpu_model(),
2803     cpu_stepping(),
2804     _cpuid_info.std_cpuid1_eax.bits.ext_family,
2805     _cpuid_info.std_cpuid1_eax.bits.ext_model,
2806     _cpuid_info.std_cpuid1_eax.bits.proc_type,
2807     _cpuid_info.std_cpuid1_eax.value,
2808     _cpuid_info.std_cpuid1_ebx.value,
2809     _cpuid_info.std_cpuid1_ecx.value,
2810     _cpuid_info.std_cpuid1_edx.value,
2811     _cpuid_info.ext_cpuid1_eax,
2812     _cpuid_info.ext_cpuid1_ebx,
2813     _cpuid_info.ext_cpuid1_ecx,
2814     _cpuid_info.ext_cpuid1_edx);
2815 
2816   if (outputLen < 0 || (size_t) outputLen >= buf_len - 1) {
2817     if (buf_len > 0) { buf[buf_len-1] = '\0'; }
2818     return OS_ERR;
2819   }
2820 
2821   cpu_write_support_string(&buf[outputLen], buf_len - outputLen);
2822 
2823   return OS_OK;
2824 }
2825 
2826 
2827 // Fill in Abstract_VM_Version statics
2828 void VM_Version::initialize_cpu_information() {
2829   assert(_vm_version_initialized, "should have initialized VM_Version long ago");
2830   assert(!_initialized, "shouldn't be initialized yet");
2831   resolve_cpu_information_details();
2832 
2833   // initialize cpu_name and cpu_desc
2834   cpu_type_description(_cpu_name, CPU_TYPE_DESC_BUF_SIZE);
2835   cpu_detailed_description(_cpu_desc, CPU_DETAILED_DESC_BUF_SIZE);
2836   _initialized = true;
2837 }
2838 
2839 /**
2840  *  For information about extracting the frequency from the cpu brand string, please see:
2841  *
2842  *    Intel Processor Identification and the CPUID Instruction
2843  *    Application Note 485
2844  *    May 2012
2845  *
2846  * The return value is the frequency in Hz.
2847  */
2848 int64_t VM_Version::max_qualified_cpu_freq_from_brand_string(void) {
2849   const char* const brand_string = cpu_brand_string();
2850   if (brand_string == nullptr) {
2851     return 0;
2852   }
2853   const int64_t MEGA = 1000000;
2854   int64_t multiplier = 0;
2855   int64_t frequency = 0;
2856   uint8_t idx = 0;
2857   // The brand string buffer is at most 48 bytes.
2858   // -2 is to prevent buffer overrun when looking for y in yHz, as z is +2 from y.
2859   for (; idx < 48-2; ++idx) {
2860     // Format is either "x.xxyHz" or "xxxxyHz", where y=M, G, T and x are digits.
2861     // Search brand string for "yHz" where y is M, G, or T.
2862     if (brand_string[idx+1] == 'H' && brand_string[idx+2] == 'z') {
2863       if (brand_string[idx] == 'M') {
2864         multiplier = MEGA;
2865       } else if (brand_string[idx] == 'G') {
2866         multiplier = MEGA * 1000;
2867       } else if (brand_string[idx] == 'T') {
2868         multiplier = MEGA * MEGA;
2869       }
2870       break;
2871     }
2872   }
2873   if (multiplier > 0) {
2874     // Compute frequency (in Hz) from brand string.
2875     if (brand_string[idx-3] == '.') { // if format is "x.xx"
2876       frequency =  (brand_string[idx-4] - '0') * multiplier;
2877       frequency += (brand_string[idx-2] - '0') * multiplier / 10;
2878       frequency += (brand_string[idx-1] - '0') * multiplier / 100;
2879     } else { // format is "xxxx"
2880       frequency =  (brand_string[idx-4] - '0') * 1000;
2881       frequency += (brand_string[idx-3] - '0') * 100;
2882       frequency += (brand_string[idx-2] - '0') * 10;
2883       frequency += (brand_string[idx-1] - '0');
2884       frequency *= multiplier;
2885     }
2886   }
2887   return frequency;
2888 }
2889 
2890 
2891 int64_t VM_Version::maximum_qualified_cpu_frequency(void) {
2892   if (_max_qualified_cpu_frequency == 0) {
2893     _max_qualified_cpu_frequency = max_qualified_cpu_freq_from_brand_string();
2894   }
2895   return _max_qualified_cpu_frequency;
2896 }
2897 
2898 VM_Version::VM_Features VM_Version::CpuidInfo::feature_flags() const {
2899   VM_Features vm_features;
2900   if (std_cpuid1_edx.bits.cmpxchg8 != 0)
2901     vm_features.set_feature(CPU_CX8);
2902   if (std_cpuid1_edx.bits.cmov != 0)
2903     vm_features.set_feature(CPU_CMOV);
2904   if (std_cpuid1_edx.bits.clflush != 0)
2905     vm_features.set_feature(CPU_FLUSH);
2906   // clflush should always be available on x86_64
2907   // if not we are in real trouble because we rely on it
2908   // to flush the code cache.
2909   assert (vm_features.supports_feature(CPU_FLUSH), "clflush should be available");
2910   if (std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
2911       ext_cpuid1_edx.bits.fxsr != 0))
2912     vm_features.set_feature(CPU_FXSR);
2913   // HT flag is set for multi-core processors also.
2914   if (threads_per_core() > 1)
2915     vm_features.set_feature(CPU_HT);
2916   if (std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
2917       ext_cpuid1_edx.bits.mmx != 0))
2918     vm_features.set_feature(CPU_MMX);
2919   if (std_cpuid1_edx.bits.sse != 0)
2920     vm_features.set_feature(CPU_SSE);
2921   if (std_cpuid1_edx.bits.sse2 != 0)
2922     vm_features.set_feature(CPU_SSE2);
2923   if (std_cpuid1_ecx.bits.sse3 != 0)
2924     vm_features.set_feature(CPU_SSE3);
2925   if (std_cpuid1_ecx.bits.ssse3 != 0)
2926     vm_features.set_feature(CPU_SSSE3);
2927   if (std_cpuid1_ecx.bits.sse4_1 != 0)
2928     vm_features.set_feature(CPU_SSE4_1);
2929   if (std_cpuid1_ecx.bits.sse4_2 != 0)
2930     vm_features.set_feature(CPU_SSE4_2);
2931   if (std_cpuid1_ecx.bits.popcnt != 0)
2932     vm_features.set_feature(CPU_POPCNT);
2933   if (sefsl1_cpuid7_edx.bits.apx_f != 0 &&
2934       xem_xcr0_eax.bits.apx_f != 0 &&
2935       std_cpuid29_ebx.bits.apx_nci_ndd_nf != 0) {
2936     vm_features.set_feature(CPU_APX_F);
2937   }
2938   if (std_cpuid1_ecx.bits.avx != 0 &&
2939       std_cpuid1_ecx.bits.osxsave != 0 &&
2940       xem_xcr0_eax.bits.sse != 0 &&
2941       xem_xcr0_eax.bits.ymm != 0) {
2942     vm_features.set_feature(CPU_AVX);
2943     vm_features.set_feature(CPU_VZEROUPPER);
2944     if (sefsl1_cpuid7_eax.bits.sha512 != 0)
2945       vm_features.set_feature(CPU_SHA512);
2946     if (std_cpuid1_ecx.bits.f16c != 0)
2947       vm_features.set_feature(CPU_F16C);
2948     if (sef_cpuid7_ebx.bits.avx2 != 0) {
2949       vm_features.set_feature(CPU_AVX2);
2950       if (sefsl1_cpuid7_eax.bits.avx_ifma != 0)
2951         vm_features.set_feature(CPU_AVX_IFMA);
2952     }
2953     if (sef_cpuid7_ecx.bits.gfni != 0)
2954         vm_features.set_feature(CPU_GFNI);
2955     if (sef_cpuid7_ebx.bits.avx512f != 0 &&
2956         xem_xcr0_eax.bits.opmask != 0 &&
2957         xem_xcr0_eax.bits.zmm512 != 0 &&
2958         xem_xcr0_eax.bits.zmm32 != 0) {
2959       vm_features.set_feature(CPU_AVX512F);
2960       if (sef_cpuid7_ebx.bits.avx512cd != 0)
2961         vm_features.set_feature(CPU_AVX512CD);
2962       if (sef_cpuid7_ebx.bits.avx512dq != 0)
2963         vm_features.set_feature(CPU_AVX512DQ);
2964       if (sef_cpuid7_ebx.bits.avx512ifma != 0)
2965         vm_features.set_feature(CPU_AVX512_IFMA);
2966       if (sef_cpuid7_ebx.bits.avx512pf != 0)
2967         vm_features.set_feature(CPU_AVX512PF);
2968       if (sef_cpuid7_ebx.bits.avx512er != 0)
2969         vm_features.set_feature(CPU_AVX512ER);
2970       if (sef_cpuid7_ebx.bits.avx512bw != 0)
2971         vm_features.set_feature(CPU_AVX512BW);
2972       if (sef_cpuid7_ebx.bits.avx512vl != 0)
2973         vm_features.set_feature(CPU_AVX512VL);
2974       if (sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
2975         vm_features.set_feature(CPU_AVX512_VPOPCNTDQ);
2976       if (sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
2977         vm_features.set_feature(CPU_AVX512_VPCLMULQDQ);
2978       if (sef_cpuid7_ecx.bits.vaes != 0)
2979         vm_features.set_feature(CPU_AVX512_VAES);
2980       if (sef_cpuid7_ecx.bits.avx512_vnni != 0)
2981         vm_features.set_feature(CPU_AVX512_VNNI);
2982       if (sef_cpuid7_ecx.bits.avx512_bitalg != 0)
2983         vm_features.set_feature(CPU_AVX512_BITALG);
2984       if (sef_cpuid7_ecx.bits.avx512_vbmi != 0)
2985         vm_features.set_feature(CPU_AVX512_VBMI);
2986       if (sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
2987         vm_features.set_feature(CPU_AVX512_VBMI2);
2988     }
2989     if (is_intel()) {
2990       if (sefsl1_cpuid7_edx.bits.avx10 != 0 &&
2991           std_cpuid24_ebx.bits.avx10_vlen_512 !=0 &&
2992           std_cpuid24_ebx.bits.avx10_converged_isa_version >= 1 &&
2993           xem_xcr0_eax.bits.opmask != 0 &&
2994           xem_xcr0_eax.bits.zmm512 != 0 &&
2995           xem_xcr0_eax.bits.zmm32 != 0) {
2996         vm_features.set_feature(CPU_AVX10_1);
2997         vm_features.set_feature(CPU_AVX512F);
2998         vm_features.set_feature(CPU_AVX512CD);
2999         vm_features.set_feature(CPU_AVX512DQ);
3000         vm_features.set_feature(CPU_AVX512PF);
3001         vm_features.set_feature(CPU_AVX512ER);
3002         vm_features.set_feature(CPU_AVX512BW);
3003         vm_features.set_feature(CPU_AVX512VL);
3004         vm_features.set_feature(CPU_AVX512_VPOPCNTDQ);
3005         vm_features.set_feature(CPU_AVX512_VPCLMULQDQ);
3006         vm_features.set_feature(CPU_AVX512_VAES);
3007         vm_features.set_feature(CPU_AVX512_VNNI);
3008         vm_features.set_feature(CPU_AVX512_BITALG);
3009         vm_features.set_feature(CPU_AVX512_VBMI);
3010         vm_features.set_feature(CPU_AVX512_VBMI2);
3011         if (std_cpuid24_ebx.bits.avx10_converged_isa_version >= 2) {
3012           vm_features.set_feature(CPU_AVX10_2);
3013         }
3014       }
3015     }
3016   }
3017 
3018   if (std_cpuid1_ecx.bits.hv != 0)
3019     vm_features.set_feature(CPU_HV);
3020   if (sef_cpuid7_ebx.bits.bmi1 != 0)
3021     vm_features.set_feature(CPU_BMI1);
3022   if (std_cpuid1_edx.bits.tsc != 0)
3023     vm_features.set_feature(CPU_TSC);
3024   if (ext_cpuid7_edx.bits.tsc_invariance != 0)
3025     vm_features.set_feature(CPU_TSCINV_BIT);
3026   if (std_cpuid1_ecx.bits.aes != 0)
3027     vm_features.set_feature(CPU_AES);
3028   if (ext_cpuid1_ecx.bits.lzcnt != 0)
3029     vm_features.set_feature(CPU_LZCNT);
3030   if (ext_cpuid1_ecx.bits.prefetchw != 0)
3031     vm_features.set_feature(CPU_3DNOW_PREFETCH);
3032   if (sef_cpuid7_ebx.bits.erms != 0)
3033     vm_features.set_feature(CPU_ERMS);
3034   if (sef_cpuid7_edx.bits.fast_short_rep_mov != 0)
3035     vm_features.set_feature(CPU_FSRM);
3036   if (std_cpuid1_ecx.bits.clmul != 0)
3037     vm_features.set_feature(CPU_CLMUL);
3038   if (sef_cpuid7_ebx.bits.rtm != 0)
3039     vm_features.set_feature(CPU_RTM);
3040   if (sef_cpuid7_ebx.bits.adx != 0)
3041      vm_features.set_feature(CPU_ADX);
3042   if (sef_cpuid7_ebx.bits.bmi2 != 0)
3043     vm_features.set_feature(CPU_BMI2);
3044   if (sef_cpuid7_ebx.bits.sha != 0)
3045     vm_features.set_feature(CPU_SHA);
3046   if (std_cpuid1_ecx.bits.fma != 0)
3047     vm_features.set_feature(CPU_FMA);
3048   if (sef_cpuid7_ebx.bits.clflushopt != 0)
3049     vm_features.set_feature(CPU_FLUSHOPT);
3050   if (sef_cpuid7_ebx.bits.clwb != 0)
3051     vm_features.set_feature(CPU_CLWB);
3052   if (ext_cpuid1_edx.bits.rdtscp != 0)
3053     vm_features.set_feature(CPU_RDTSCP);
3054   if (sef_cpuid7_ecx.bits.rdpid != 0)
3055     vm_features.set_feature(CPU_RDPID);
3056 
3057   // AMD|Hygon additional features.
3058   if (is_amd_family()) {
3059     // PREFETCHW was checked above, check TDNOW here.
3060     if ((ext_cpuid1_edx.bits.tdnow != 0))
3061       vm_features.set_feature(CPU_3DNOW_PREFETCH);
3062     if (ext_cpuid1_ecx.bits.sse4a != 0)
3063       vm_features.set_feature(CPU_SSE4A);
3064   }
3065 
3066   // Intel additional features.
3067   if (is_intel()) {
3068     if (sef_cpuid7_edx.bits.serialize != 0)
3069       vm_features.set_feature(CPU_SERIALIZE);
3070     if (sef_cpuid7_edx.bits.hybrid != 0)
3071       vm_features.set_feature(CPU_HYBRID);
3072     if (_cpuid_info.sef_cpuid7_edx.bits.avx512_fp16 != 0)
3073       vm_features.set_feature(CPU_AVX512_FP16);
3074   }
3075 
3076   // ZX additional features.
3077   if (is_zx()) {
3078     // We do not know if these are supported by ZX, so we cannot trust
3079     // common CPUID bit for them.
3080     assert(vm_features.supports_feature(CPU_CLWB), "Check if it is supported?");
3081     vm_features.clear_feature(CPU_CLWB);
3082   }
3083 
3084   // Protection key features.
3085   if (sef_cpuid7_ecx.bits.pku != 0) {
3086     vm_features.set_feature(CPU_PKU);
3087   }
3088   if (sef_cpuid7_ecx.bits.ospke != 0) {
3089     vm_features.set_feature(CPU_OSPKE);
3090   }
3091 
3092   // Control flow enforcement (CET) features.
3093   if (sef_cpuid7_ecx.bits.cet_ss != 0) {
3094     vm_features.set_feature(CPU_CET_SS);
3095   }
3096   if (sef_cpuid7_edx.bits.cet_ibt != 0) {
3097     vm_features.set_feature(CPU_CET_IBT);
3098   }
3099 
3100   // Composite features.
3101   if (supports_tscinv_bit() &&
3102       ((is_amd_family() && !is_amd_Barcelona()) ||
3103        is_intel_tsc_synched_at_init())) {
3104     vm_features.set_feature(CPU_TSCINV);
3105   }
3106   return vm_features;
3107 }
3108 
3109 bool VM_Version::os_supports_avx_vectors() {
3110   bool retVal = false;
3111   int nreg = 4;
3112   if (supports_evex()) {
3113     // Verify that OS save/restore all bits of EVEX registers
3114     // during signal processing.
3115     retVal = true;
3116     for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
3117       if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
3118         retVal = false;
3119         break;
3120       }
3121     }
3122   } else if (supports_avx()) {
3123     // Verify that OS save/restore all bits of AVX registers
3124     // during signal processing.
3125     retVal = true;
3126     for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register
3127       if (_cpuid_info.ymm_save[i] != ymm_test_value()) {
3128         retVal = false;
3129         break;
3130       }
3131     }
3132     // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen
3133     if (retVal == false) {
3134       // Verify that OS save/restore all bits of EVEX registers
3135       // during signal processing.
3136       retVal = true;
3137       for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register
3138         if (_cpuid_info.zmm_save[i] != ymm_test_value()) {
3139           retVal = false;
3140           break;
3141         }
3142       }
3143     }
3144   }
3145   return retVal;
3146 }
3147 
3148 bool VM_Version::os_supports_apx_egprs() {
3149   if (!supports_apx_f()) {
3150     return false;
3151   }
3152   if (_cpuid_info.apx_save[0] != egpr_test_value() ||
3153       _cpuid_info.apx_save[1] != egpr_test_value()) {
3154     return false;
3155   }
3156   return true;
3157 }
3158 
3159 uint VM_Version::cores_per_cpu() {
3160   uint result = 1;
3161   if (is_intel()) {
3162     bool supports_topology = supports_processor_topology();
3163     if (supports_topology) {
3164       result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
3165                _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3166     }
3167     if (!supports_topology || result == 0) {
3168       result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
3169     }
3170   } else if (is_amd_family()) {
3171     result = _cpuid_info.ext_cpuid8_ecx.bits.threads_per_cpu + 1;
3172     if (cpu_family() >= 0x17) { // Zen or later
3173       result /= _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
3174     }
3175   } else if (is_zx()) {
3176     bool supports_topology = supports_processor_topology();
3177     if (supports_topology) {
3178       result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
3179                _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3180     }
3181     if (!supports_topology || result == 0) {
3182       result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
3183     }
3184   }
3185   return result;
3186 }
3187 
3188 uint VM_Version::threads_per_core() {
3189   uint result = 1;
3190   if (is_intel() && supports_processor_topology()) {
3191     result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3192   } else if (is_zx() && supports_processor_topology()) {
3193     result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
3194   } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
3195     if (cpu_family() >= 0x17) {
3196       result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
3197     } else {
3198       result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu /
3199                  cores_per_cpu();
3200     }
3201   }
3202   return (result == 0 ? 1 : result);
3203 }
3204 
3205 uint VM_Version::L1_line_size() {
3206   uint result = 0;
3207   if (is_intel()) {
3208     result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
3209   } else if (is_amd_family()) {
3210     result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
3211   } else if (is_zx()) {
3212     result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
3213   }
3214   if (result < 32) // not defined ?
3215     result = 32;   // 32 bytes by default on x86 and other x64
3216   return result;
3217 }
3218 
3219 bool VM_Version::is_intel_tsc_synched_at_init() {
3220   if (is_intel_family_core()) {
3221     uint32_t ext_model = extended_cpu_model();
3222     if (ext_model == CPU_MODEL_NEHALEM_EP     ||
3223         ext_model == CPU_MODEL_WESTMERE_EP    ||
3224         ext_model == CPU_MODEL_SANDYBRIDGE_EP ||
3225         ext_model == CPU_MODEL_IVYBRIDGE_EP) {
3226       // <= 2-socket invariant tsc support. EX versions are usually used
3227       // in > 2-socket systems and likely don't synchronize tscs at
3228       // initialization.
3229       // Code that uses tsc values must be prepared for them to arbitrarily
3230       // jump forward or backward.
3231       return true;
3232     }
3233   }
3234   return false;
3235 }
3236 
3237 int VM_Version::allocate_prefetch_distance(bool use_watermark_prefetch) {
3238   // Hardware prefetching (distance/size in bytes):
3239   // Pentium 3 -  64 /  32
3240   // Pentium 4 - 256 / 128
3241   // Athlon    -  64 /  32 ????
3242   // Opteron   - 128 /  64 only when 2 sequential cache lines accessed
3243   // Core      - 128 /  64
3244   //
3245   // Software prefetching (distance in bytes / instruction with best score):
3246   // Pentium 3 - 128 / prefetchnta
3247   // Pentium 4 - 512 / prefetchnta
3248   // Athlon    - 128 / prefetchnta
3249   // Opteron   - 256 / prefetchnta
3250   // Core      - 256 / prefetchnta
3251   // It will be used only when AllocatePrefetchStyle > 0
3252 
3253   if (is_amd_family()) { // AMD | Hygon
3254     if (supports_sse2()) {
3255       return 256; // Opteron
3256     } else {
3257       return 128; // Athlon
3258     }
3259   } else { // Intel
3260     if (supports_sse3() && is_intel_server_family()) {
3261       if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus
3262         return 192;
3263       } else if (use_watermark_prefetch) { // watermark prefetching on Core
3264         return 384;
3265       }
3266     }
3267     if (supports_sse2()) {
3268       if (is_intel_server_family()) {
3269         return 256; // Pentium M, Core, Core2
3270       } else {
3271         return 512; // Pentium 4
3272       }
3273     } else {
3274       return 128; // Pentium 3 (and all other old CPUs)
3275     }
3276   }
3277 }
3278 
3279 bool VM_Version::is_intrinsic_supported(vmIntrinsicID id) {
3280   assert(id != vmIntrinsics::_none, "must be a VM intrinsic");
3281   switch (id) {
3282   case vmIntrinsics::_floatToFloat16:
3283   case vmIntrinsics::_float16ToFloat:
3284     if (!supports_float16()) {
3285       return false;
3286     }
3287     break;
3288   default:
3289     break;
3290   }
3291   return true;
3292 }
3293 
3294 void VM_Version::insert_features_names(VM_Version::VM_Features features, stringStream& ss) {
3295   int i = 0;
3296   ss.join([&]() {
3297     const char* str = nullptr;
3298     while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
3299       if (features.supports_feature((VM_Version::Feature_Flag)i)) {
3300         str = _features_names[i];
3301       }
3302       i += 1;
3303     }
3304     return str;
3305   }, ", ");
3306 }
3307 
3308 void VM_Version::get_cpu_features_name(void* features_buffer, stringStream& ss) {
3309   VM_Features* features = (VM_Features*)features_buffer;
3310   insert_features_names(*features, ss);
3311 }
3312 
3313 void VM_Version::get_missing_features_name(void* features_buffer, stringStream& ss) {
3314   VM_Features* features_to_test = (VM_Features*)features_buffer;
3315   int i = 0;
3316   ss.join([&]() {
3317     const char* str = nullptr;
3318     while ((i < MAX_CPU_FEATURES) && (str == nullptr)) {
3319       Feature_Flag flag = (Feature_Flag)i;
3320       if (features_to_test->supports_feature(flag) && !_features.supports_feature(flag)) {
3321         str = _features_names[i];
3322       }
3323       i += 1;
3324     }
3325     return str;
3326   }, ", ");
3327 }
3328 
3329 int VM_Version::cpu_features_size() {
3330   return sizeof(VM_Features);
3331 }
3332 
3333 void VM_Version::store_cpu_features(void* buf) {
3334   VM_Features copy = _features;
3335   copy.clear_feature(CPU_HT); // HT does not result in incompatibility of aot code cache
3336   memcpy(buf, &copy, sizeof(VM_Features));
3337 }
3338 
3339 bool VM_Version::supports_features(void* features_buffer) {
3340   VM_Features* features_to_test = (VM_Features*)features_buffer;
3341   return _features.supports_features(features_to_test);
3342 }