867 IRNode.VECTOR_BLEND_F, ">0",
868 IRNode.STORE_VECTOR, ">0"},
869 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"},
870 applyIf = {"UseVectorCmov", "true"})
871 @IR(failOn = {IRNode.STORE_VECTOR},
872 applyIf = {"UseVectorCmov", "false"})
873 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
874 applyIf = {"UseVectorCmov", "false"},
875 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
876 private static void testCMoveFNEQforFConst(float[] a, float[] b, float[] c) {
877 for (int i = 0; i < a.length; i++) {
878 c[i] = (a[i] != b[i]) ? 0.1f : -0.1f;
879 }
880 }
881
882 @Test
883 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
884 IRNode.VECTOR_MASK_CMP_F, ">0",
885 IRNode.VECTOR_BLEND_F, ">0",
886 IRNode.STORE_VECTOR, ">0"},
887 applyIfAnd = {"UseCompactObjectHeaders", "false", "UseVectorCmov", "true"},
888 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
889 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
890 IRNode.VECTOR_MASK_CMP_F, ">0",
891 IRNode.VECTOR_BLEND_F, ">0",
892 IRNode.STORE_VECTOR, ">0"},
893 applyIfAnd = {"AlignVector", "false", "UseVectorCmov", "true"},
894 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
895 @IR(failOn = {IRNode.STORE_VECTOR},
896 applyIf = {"UseVectorCmov", "false"})
897 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
898 applyIf = {"UseVectorCmov", "false"},
899 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
900 private static void testCMoveFLTforFConstH2(float[] a, float[] b, float[] c) {
901 for (int i = 0; i < a.length; i+=2) {
902 c[i+0] = (a[i+0] < b[i+0]) ? 0.1f : -0.1f;
903 c[i+1] = (a[i+1] < b[i+1]) ? 0.1f : -0.1f;
904 // With AlignVector, we need 8-byte alignment of vector loads/stores.
905 // UseCompactObjectHeaders=false UseCompactObjectHeaders=true
906 // adr = base + 16 + 8*i -> always adr = base + 12 + 8*i -> never
907 // -> vectorize -> no vectorization
908 }
909 }
910
911 @Test
912 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
913 IRNode.VECTOR_MASK_CMP_F, ">0",
914 IRNode.VECTOR_BLEND_F, ">0",
915 IRNode.STORE_VECTOR, ">0"},
916 applyIfAnd = {"UseCompactObjectHeaders", "false", "UseVectorCmov", "true"},
917 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
918 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
919 IRNode.VECTOR_MASK_CMP_F, ">0",
920 IRNode.VECTOR_BLEND_F, ">0",
921 IRNode.STORE_VECTOR, ">0"},
922 applyIfAnd = {"AlignVector", "false", "UseVectorCmov", "true"},
923 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
924 @IR(failOn = {IRNode.STORE_VECTOR},
925 applyIf = {"UseVectorCmov", "false"})
926 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
927 applyIf = {"UseVectorCmov", "false"},
928 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
929 private static void testCMoveFLEforFConstH2(float[] a, float[] b, float[] c) {
930 for (int i = 0; i < a.length; i+=2) {
931 c[i+0] = (a[i+0] <= b[i+0]) ? 0.1f : -0.1f;
932 c[i+1] = (a[i+1] <= b[i+1]) ? 0.1f : -0.1f;
933 // With AlignVector, we need 8-byte alignment of vector loads/stores.
934 // UseCompactObjectHeaders=false UseCompactObjectHeaders=true
935 // adr = base + 16 + 8*i -> always adr = base + 12 + 8*i -> never
936 // -> vectorize -> no vectorization
937 }
938 }
939
940 @Test
941 @IR(counts = {IRNode.LOAD_VECTOR_F, "=0",
942 IRNode.VECTOR_MASK_CMP_F, "=0",
943 IRNode.VECTOR_BLEND_F, "=0",
944 IRNode.STORE_VECTOR, "=0"},
945 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"},
946 applyIf = {"UseVectorCmov", "true"})
947 @IR(failOn = {IRNode.STORE_VECTOR},
948 applyIf = {"UseVectorCmov", "false"})
949 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
950 applyIf = {"UseVectorCmov", "false"},
951 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
952 private static void testCMoveFYYforFConstH2(float[] a, float[] b, float[] c) {
953 for (int i = 0; i < a.length; i+=2) {
954 c[i+0] = (a[i+0] <= b[i+0]) ? 0.1f : -0.1f;
955 c[i+1] = (a[i+1] < b[i+1]) ? 0.1f : -0.1f;
956 }
|
867 IRNode.VECTOR_BLEND_F, ">0",
868 IRNode.STORE_VECTOR, ">0"},
869 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"},
870 applyIf = {"UseVectorCmov", "true"})
871 @IR(failOn = {IRNode.STORE_VECTOR},
872 applyIf = {"UseVectorCmov", "false"})
873 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
874 applyIf = {"UseVectorCmov", "false"},
875 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
876 private static void testCMoveFNEQforFConst(float[] a, float[] b, float[] c) {
877 for (int i = 0; i < a.length; i++) {
878 c[i] = (a[i] != b[i]) ? 0.1f : -0.1f;
879 }
880 }
881
882 @Test
883 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
884 IRNode.VECTOR_MASK_CMP_F, ">0",
885 IRNode.VECTOR_BLEND_F, ">0",
886 IRNode.STORE_VECTOR, ">0"},
887 applyIf = {"UseVectorCmov", "true"},
888 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
889 @IR(failOn = {IRNode.STORE_VECTOR},
890 applyIf = {"UseVectorCmov", "false"})
891 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
892 applyIf = {"UseVectorCmov", "false"},
893 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
894 private static void testCMoveFLTforFConstH2(float[] a, float[] b, float[] c) {
895 for (int i = 0; i < a.length; i+=2) {
896 c[i+0] = (a[i+0] < b[i+0]) ? 0.1f : -0.1f;
897 c[i+1] = (a[i+1] < b[i+1]) ? 0.1f : -0.1f;
898 }
899 }
900
901 @Test
902 @IR(counts = {IRNode.LOAD_VECTOR_F, ">0",
903 IRNode.VECTOR_MASK_CMP_F, ">0",
904 IRNode.VECTOR_BLEND_F, ">0",
905 IRNode.STORE_VECTOR, ">0"},
906 applyIf = {"UseVectorCmov", "true"},
907 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"})
908 @IR(failOn = {IRNode.STORE_VECTOR},
909 applyIf = {"UseVectorCmov", "false"})
910 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
911 applyIf = {"UseVectorCmov", "false"},
912 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
913 private static void testCMoveFLEforFConstH2(float[] a, float[] b, float[] c) {
914 for (int i = 0; i < a.length; i+=2) {
915 c[i+0] = (a[i+0] <= b[i+0]) ? 0.1f : -0.1f;
916 c[i+1] = (a[i+1] <= b[i+1]) ? 0.1f : -0.1f;
917 }
918 }
919
920 @Test
921 @IR(counts = {IRNode.LOAD_VECTOR_F, "=0",
922 IRNode.VECTOR_MASK_CMP_F, "=0",
923 IRNode.VECTOR_BLEND_F, "=0",
924 IRNode.STORE_VECTOR, "=0"},
925 applyIfCPUFeatureOr = {"avx", "true", "asimd", "true", "rvv", "true"},
926 applyIf = {"UseVectorCmov", "true"})
927 @IR(failOn = {IRNode.STORE_VECTOR},
928 applyIf = {"UseVectorCmov", "false"})
929 @IR(counts = {IRNode.CMOVE_F, ">0", IRNode.CMP_F, ">0"},
930 applyIf = {"UseVectorCmov", "false"},
931 applyIfPlatformOr = {"riscv64", "true", "x64", "true", "aarch64", "true"})
932 private static void testCMoveFYYforFConstH2(float[] a, float[] b, float[] c) {
933 for (int i = 0; i < a.length; i+=2) {
934 c[i+0] = (a[i+0] <= b[i+0]) ? 0.1f : -0.1f;
935 c[i+1] = (a[i+1] < b[i+1]) ? 0.1f : -0.1f;
936 }
|