< prev index next >

src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp

Print this page

3754     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3755   }
3756   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3757   for (int idx = 0; idx < 4; idx++) {
3758     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3759     arrays_hashcode_elvcast(vtmp[idx], eltype);
3760     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3761   }
3762   // index += 32;
3763   addl(index, 32);
3764   // index < bound;
3765   cmpl(index, bound);
3766   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3767   // }
3768 
3769   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3770   subl(cnt1, bound);
3771   // release bound
3772 
3773   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);

3774   for (int idx = 0; idx < 4; idx++) {
3775     lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + ((8 * idx + 1) * sizeof(jint))));
3776     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, 0), T_INT);
3777     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3778   }
3779   // result += vresult.reduceLanes(ADD);
3780   for (int idx = 0; idx < 4; idx++) {
3781     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3782   }
3783 
3784   // } else if (cnt1 < 32) {
3785 
3786   bind(SHORT_UNROLLED_BEGIN);
3787   // int i = 1;
3788   movl(index, 1);
3789   cmpl(index, cnt1);
3790   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3791 
3792   // for (; i < cnt1 ; i += 2) {
3793   bind(SHORT_UNROLLED_LOOP_BEGIN);
3794   movl(tmp3, 961);
3795   imull(result, tmp3);
3796   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);

3754     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3755   }
3756   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3757   for (int idx = 0; idx < 4; idx++) {
3758     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3759     arrays_hashcode_elvcast(vtmp[idx], eltype);
3760     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3761   }
3762   // index += 32;
3763   addl(index, 32);
3764   // index < bound;
3765   cmpl(index, bound);
3766   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3767   // }
3768 
3769   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3770   subl(cnt1, bound);
3771   // release bound
3772 
3773   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);
3774   lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + (0 * sizeof(jint))));
3775   for (int idx = 0; idx < 4; idx++) {
3776     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, (int)((8 * idx + 1) * sizeof(jint))), T_INT);

3777     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3778   }
3779   // result += vresult.reduceLanes(ADD);
3780   for (int idx = 0; idx < 4; idx++) {
3781     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3782   }
3783 
3784   // } else if (cnt1 < 32) {
3785 
3786   bind(SHORT_UNROLLED_BEGIN);
3787   // int i = 1;
3788   movl(index, 1);
3789   cmpl(index, cnt1);
3790   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3791 
3792   // for (; i < cnt1 ; i += 2) {
3793   bind(SHORT_UNROLLED_LOOP_BEGIN);
3794   movl(tmp3, 961);
3795   imull(result, tmp3);
3796   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);
< prev index next >