< prev index next >

src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp

Print this page

3582     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3583   }
3584   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3585   for (int idx = 0; idx < 4; idx++) {
3586     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3587     arrays_hashcode_elvcast(vtmp[idx], eltype);
3588     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3589   }
3590   // index += 32;
3591   addl(index, 32);
3592   // index < bound;
3593   cmpl(index, bound);
3594   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3595   // }
3596 
3597   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3598   subl(cnt1, bound);
3599   // release bound
3600 
3601   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);

3602   for (int idx = 0; idx < 4; idx++) {
3603     lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + ((8 * idx + 1) * sizeof(jint))));
3604     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, 0), T_INT);
3605     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3606   }
3607   // result += vresult.reduceLanes(ADD);
3608   for (int idx = 0; idx < 4; idx++) {
3609     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3610   }
3611 
3612   // } else if (cnt1 < 32) {
3613 
3614   bind(SHORT_UNROLLED_BEGIN);
3615   // int i = 1;
3616   movl(index, 1);
3617   cmpl(index, cnt1);
3618   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3619 
3620   // for (; i < cnt1 ; i += 2) {
3621   bind(SHORT_UNROLLED_LOOP_BEGIN);
3622   movl(tmp3, 961);
3623   imull(result, tmp3);
3624   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);

3582     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3583   }
3584   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3585   for (int idx = 0; idx < 4; idx++) {
3586     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3587     arrays_hashcode_elvcast(vtmp[idx], eltype);
3588     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3589   }
3590   // index += 32;
3591   addl(index, 32);
3592   // index < bound;
3593   cmpl(index, bound);
3594   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3595   // }
3596 
3597   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3598   subl(cnt1, bound);
3599   // release bound
3600 
3601   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);
3602   lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + (0 * sizeof(jint))));
3603   for (int idx = 0; idx < 4; idx++) {
3604     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, (int)((8 * idx + 1) * sizeof(jint))), T_INT);

3605     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3606   }
3607   // result += vresult.reduceLanes(ADD);
3608   for (int idx = 0; idx < 4; idx++) {
3609     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3610   }
3611 
3612   // } else if (cnt1 < 32) {
3613 
3614   bind(SHORT_UNROLLED_BEGIN);
3615   // int i = 1;
3616   movl(index, 1);
3617   cmpl(index, cnt1);
3618   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3619 
3620   // for (; i < cnt1 ; i += 2) {
3621   bind(SHORT_UNROLLED_LOOP_BEGIN);
3622   movl(tmp3, 961);
3623   imull(result, tmp3);
3624   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);
< prev index next >