47
48 @Run(test = {"test1"})
49 @Warmup(0)
50 public void runTests() throws Exception {
51 int[] data = new int[RANGE];
52
53 init(data);
54 for (int i = 0; i < ITER; i++) {
55 long r1 = test1(data, i);
56 long r2 = ref1(data, i);
57 if (r1 != r2) {
58 throw new RuntimeException("Wrong result test1: " + r1 + " != " + r2);
59 }
60 }
61 }
62
63 @Test
64 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
65 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
66 IRNode.OR_REDUCTION_V, "> 0",},
67 applyIfOr = {"AlignVector", "false", "UseCompactObjectHeaders", "false"},
68 applyIfPlatform = {"64-bit", "true"},
69 applyIfCPUFeature = {"avx2", "true"})
70 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
71 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
72 IRNode.OR_REDUCTION_V, "> 0",},
73 applyIfAnd = {"AlignVector", "false", "MaxVectorSize", ">=32"},
74 applyIfPlatform = {"riscv64", "true"},
75 applyIfCPUFeature = {"rvv", "true"})
76 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
77 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
78 IRNode.OR_REDUCTION_V, "> 0",},
79 applyIfAnd = {"UseCompactObjectHeaders", "false", "MaxVectorSize", ">=32"},
80 applyIfPlatform = {"riscv64", "true"},
81 applyIfCPUFeature = {"rvv", "true"})
82 static long test1(int[] data, long sum) {
83 for (int i = 0; i < data.length; i+=2) {
84 // Mixing int and long ops means we only end up allowing half of the int
85 // loads in one pack, and we have two int packs. The first pack has one
86 // of the pairs missing because of the store, which creates a dependency.
87 // The first pack is rejected and left as scalar, the second pack succeeds
88 // with vectorization. That means we have a mixed scalar/vector reduction
89 // chain. This way it is possible that a vector-reduction has a scalar
90 // reduction as input, which is neigher a phi nor a vector reduction.
91 // In such a case, we must bail out of the optimization in
92 // PhaseIdealLoop::move_unordered_reduction_out_of_loop
93 int v = data[i]; // int read
94 data[0] = 0; // ruin the first pack
95 sum |= v; // long reduction (and implicit cast from int to long)
96
97 // This example used to rely on that reductions were ignored in SuperWord::unrolling_analysis,
98 // and hence the largest data type in the loop was the ints. This would then unroll the doubles
99 // for twice the vector length, and this resulted in us having twice as many packs. Because of
100 // the store "data[0] = 0", the first packs were destroyed, since they do not have power of 2
101 // size.
102 // Now, we no longer ignore reductions, and now we unroll half as much before SuperWord. This
103 // means we would only get one pack per operation, and that one would get ruined, and we have
104 // no vectorization. We now ensure there are again 2 packs per operation with a 2x hand unroll.
105 int v2 = data[i + 1];
106 sum |= v2;
107
108 // With AlignVector, we need 8-byte alignment of vector loads/stores.
109 // UseCompactObjectHeaders=false UseCompactObjectHeaders=true
110 // adr = base + 16 + 8*i -> always adr = base + 12 + 8*i -> never
111 // -> vectorize -> no vectorization
112 }
113 return sum;
114 }
115
116 static long ref1(int[] data, long sum) {
117 for (int i = 0; i < data.length; i++) {
118 int v = data[i];
119 data[0] = 0;
120 sum |= v;
121 }
122 return sum;
123 }
124
125 static void init(int[] data) {
126 for (int i = 0; i < RANGE; i++) {
127 data[i] = i + 1;
128 }
129 }
130 }
|
47
48 @Run(test = {"test1"})
49 @Warmup(0)
50 public void runTests() throws Exception {
51 int[] data = new int[RANGE];
52
53 init(data);
54 for (int i = 0; i < ITER; i++) {
55 long r1 = test1(data, i);
56 long r2 = ref1(data, i);
57 if (r1 != r2) {
58 throw new RuntimeException("Wrong result test1: " + r1 + " != " + r2);
59 }
60 }
61 }
62
63 @Test
64 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
65 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
66 IRNode.OR_REDUCTION_V, "> 0",},
67 applyIfPlatform = {"64-bit", "true"},
68 applyIfCPUFeature = {"avx2", "true"})
69 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
70 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
71 IRNode.OR_REDUCTION_V, "> 0",},
72 applyIfAnd = {"AlignVector", "false", "MaxVectorSize", ">=32"},
73 applyIfPlatform = {"riscv64", "true"},
74 applyIfCPUFeature = {"rvv", "true"})
75 @IR(counts = {IRNode.LOAD_VECTOR_I, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
76 IRNode.VECTOR_CAST_I2L, IRNode.VECTOR_SIZE + "min(max_int, max_long)", "> 0",
77 IRNode.OR_REDUCTION_V, "> 0",},
78 applyIfAnd = {"UseCompactObjectHeaders", "false", "MaxVectorSize", ">=32"},
79 applyIfPlatform = {"riscv64", "true"},
80 applyIfCPUFeature = {"rvv", "true"})
81 static long test1(int[] data, long sum) {
82 for (int i = 0; i < data.length; i+=2) {
83 // Mixing int and long ops means we only end up allowing half of the int
84 // loads in one pack, and we have two int packs. The first pack has one
85 // of the pairs missing because of the store, which creates a dependency.
86 // The first pack is rejected and left as scalar, the second pack succeeds
87 // with vectorization. That means we have a mixed scalar/vector reduction
88 // chain. This way it is possible that a vector-reduction has a scalar
89 // reduction as input, which is neigher a phi nor a vector reduction.
90 // In such a case, we must bail out of the optimization in
91 // PhaseIdealLoop::move_unordered_reduction_out_of_loop
92 int v = data[i]; // int read
93 data[0] = 0; // ruin the first pack
94 sum |= v; // long reduction (and implicit cast from int to long)
95
96 // This example used to rely on that reductions were ignored in SuperWord::unrolling_analysis,
97 // and hence the largest data type in the loop was the ints. This would then unroll the doubles
98 // for twice the vector length, and this resulted in us having twice as many packs. Because of
99 // the store "data[0] = 0", the first packs were destroyed, since they do not have power of 2
100 // size.
101 // Now, we no longer ignore reductions, and now we unroll half as much before SuperWord. This
102 // means we would only get one pack per operation, and that one would get ruined, and we have
103 // no vectorization. We now ensure there are again 2 packs per operation with a 2x hand unroll.
104 int v2 = data[i + 1];
105 sum |= v2;
106 }
107 return sum;
108 }
109
110 static long ref1(int[] data, long sum) {
111 for (int i = 0; i < data.length; i++) {
112 int v = data[i];
113 data[0] = 0;
114 sum |= v;
115 }
116 return sum;
117 }
118
119 static void init(int[] data) {
120 for (int i = 0; i < RANGE; i++) {
121 data[i] = i + 1;
122 }
123 }
124 }
|