< prev index next >

src/hotspot/cpu/x86/vm_version_x86.cpp

Print this page

1554     FLAG_SET_DEFAULT(UseFastStosb, false);
1555   }
1556 
1557   // For AMD Processors use XMM/YMM MOVDQU instructions
1558   // for Object Initialization as default
1559   if (is_amd() && cpu_family() >= 0x19) {
1560     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1561       UseFastStosb = false;
1562     }
1563   }
1564 
1565 #ifdef COMPILER2
1566   if (is_intel() && MaxVectorSize > 16) {
1567     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1568       UseFastStosb = false;
1569     }
1570   }
1571 #endif
1572 
1573   // Use XMM/YMM MOVDQU instruction for Object Initialization
1574   if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) {
1575     if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1576       UseXMMForObjInit = true;
1577     }
1578   } else if (UseXMMForObjInit) {
1579     warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1580     FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1581   }
1582 
1583 #ifdef COMPILER2
1584   if (FLAG_IS_DEFAULT(AlignVector)) {
1585     // Modern processors allow misaligned memory operations for vectors.
1586     AlignVector = !UseUnalignedLoadStores;
1587   }
1588   if (FLAG_IS_DEFAULT(OptimizeFill)) {
1589     // 8247307: On x86, the auto-vectorized loop array fill code shows
1590     // better performance than the array fill stubs. We should reenable
1591     // this after the x86 stubs get improved.
1592     OptimizeFill = false;
1593   }
1594 #endif // COMPILER2

1554     FLAG_SET_DEFAULT(UseFastStosb, false);
1555   }
1556 
1557   // For AMD Processors use XMM/YMM MOVDQU instructions
1558   // for Object Initialization as default
1559   if (is_amd() && cpu_family() >= 0x19) {
1560     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1561       UseFastStosb = false;
1562     }
1563   }
1564 
1565 #ifdef COMPILER2
1566   if (is_intel() && MaxVectorSize > 16) {
1567     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1568       UseFastStosb = false;
1569     }
1570   }
1571 #endif
1572 
1573   // Use XMM/YMM MOVDQU instruction for Object Initialization
1574   if (UseSSE >= 2 && UseUnalignedLoadStores) {
1575     if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1576       UseXMMForObjInit = true;
1577     }
1578   } else if (UseXMMForObjInit) {
1579     warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1580     FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1581   }
1582 
1583 #ifdef COMPILER2
1584   if (FLAG_IS_DEFAULT(AlignVector)) {
1585     // Modern processors allow misaligned memory operations for vectors.
1586     AlignVector = !UseUnalignedLoadStores;
1587   }
1588   if (FLAG_IS_DEFAULT(OptimizeFill)) {
1589     // 8247307: On x86, the auto-vectorized loop array fill code shows
1590     // better performance than the array fill stubs. We should reenable
1591     // this after the x86 stubs get improved.
1592     OptimizeFill = false;
1593   }
1594 #endif // COMPILER2
< prev index next >