< prev index next >

src/hotspot/cpu/x86/vm_version_x86.cpp

Print this page

1774     FLAG_SET_DEFAULT(UseFastStosb, false);
1775   }
1776 
1777   // For AMD Processors use XMM/YMM MOVDQU instructions
1778   // for Object Initialization as default
1779   if (is_amd() && cpu_family() >= 0x19) {
1780     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1781       UseFastStosb = false;
1782     }
1783   }
1784 
1785 #ifdef COMPILER2
1786   if (is_intel() && MaxVectorSize > 16) {
1787     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1788       UseFastStosb = false;
1789     }
1790   }
1791 #endif
1792 
1793   // Use XMM/YMM MOVDQU instruction for Object Initialization
1794   if (!UseFastStosb && UseUnalignedLoadStores) {
1795     if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1796       UseXMMForObjInit = true;
1797     }
1798   } else if (UseXMMForObjInit) {
1799     warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1800     FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1801   }
1802 
1803 #ifdef COMPILER2
1804   if (FLAG_IS_DEFAULT(AlignVector)) {
1805     // Modern processors allow misaligned memory operations for vectors.
1806     AlignVector = !UseUnalignedLoadStores;
1807   }
1808 #endif // COMPILER2
1809 
1810   if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1811     if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1812       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1813     } else if (!supports_sse() && supports_3dnow_prefetch()) {
1814       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);

1774     FLAG_SET_DEFAULT(UseFastStosb, false);
1775   }
1776 
1777   // For AMD Processors use XMM/YMM MOVDQU instructions
1778   // for Object Initialization as default
1779   if (is_amd() && cpu_family() >= 0x19) {
1780     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1781       UseFastStosb = false;
1782     }
1783   }
1784 
1785 #ifdef COMPILER2
1786   if (is_intel() && MaxVectorSize > 16) {
1787     if (FLAG_IS_DEFAULT(UseFastStosb)) {
1788       UseFastStosb = false;
1789     }
1790   }
1791 #endif
1792 
1793   // Use XMM/YMM MOVDQU instruction for Object Initialization
1794   if (UseUnalignedLoadStores) {
1795     if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1796       UseXMMForObjInit = true;
1797     }
1798   } else if (UseXMMForObjInit) {
1799     warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1800     FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1801   }
1802 
1803 #ifdef COMPILER2
1804   if (FLAG_IS_DEFAULT(AlignVector)) {
1805     // Modern processors allow misaligned memory operations for vectors.
1806     AlignVector = !UseUnalignedLoadStores;
1807   }
1808 #endif // COMPILER2
1809 
1810   if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1811     if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1812       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1813     } else if (!supports_sse() && supports_3dnow_prefetch()) {
1814       FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
< prev index next >