< prev index next >

src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp

Print this page

3395     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3396   }
3397   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3398   for (int idx = 0; idx < 4; idx++) {
3399     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3400     arrays_hashcode_elvcast(vtmp[idx], eltype);
3401     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3402   }
3403   // index += 32;
3404   addl(index, 32);
3405   // index < bound;
3406   cmpl(index, bound);
3407   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3408   // }
3409 
3410   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3411   subl(cnt1, bound);
3412   // release bound
3413 
3414   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);

3415   for (int idx = 0; idx < 4; idx++) {
3416     lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + ((8 * idx + 1) * sizeof(jint))));
3417     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, 0), T_INT);
3418     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3419   }
3420   // result += vresult.reduceLanes(ADD);
3421   for (int idx = 0; idx < 4; idx++) {
3422     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3423   }
3424 
3425   // } else if (cnt1 < 32) {
3426 
3427   bind(SHORT_UNROLLED_BEGIN);
3428   // int i = 1;
3429   movl(index, 1);
3430   cmpl(index, cnt1);
3431   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3432 
3433   // for (; i < cnt1 ; i += 2) {
3434   bind(SHORT_UNROLLED_LOOP_BEGIN);
3435   movl(tmp3, 961);
3436   imull(result, tmp3);
3437   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);

3395     arrays_hashcode_elvload(vtmp[idx], Address(ary1, index, Address::times(elsize), 8 * idx * elsize), eltype);
3396   }
3397   // vresult = vresult * vnext + ary1[index+8*idx:index+8*idx+7];
3398   for (int idx = 0; idx < 4; idx++) {
3399     vpmulld(vresult[idx], vresult[idx], vnext, Assembler::AVX_256bit);
3400     arrays_hashcode_elvcast(vtmp[idx], eltype);
3401     vpaddd(vresult[idx], vresult[idx], vtmp[idx], Assembler::AVX_256bit);
3402   }
3403   // index += 32;
3404   addl(index, 32);
3405   // index < bound;
3406   cmpl(index, bound);
3407   jcc(Assembler::less, UNROLLED_VECTOR_LOOP_BEGIN);
3408   // }
3409 
3410   lea(ary1, Address(ary1, bound, Address::times(elsize)));
3411   subl(cnt1, bound);
3412   // release bound
3413 
3414   // vresult *= IntVector.fromArray(I256, power_of_31_backwards, 1);
3415   lea(tmp2, ExternalAddress(StubRoutines::x86::arrays_hashcode_powers_of_31() + (0 * sizeof(jint))));
3416   for (int idx = 0; idx < 4; idx++) {
3417     arrays_hashcode_elvload(vcoef[idx], Address(tmp2, (int)((8 * idx + 1) * sizeof(jint))), T_INT);

3418     vpmulld(vresult[idx], vresult[idx], vcoef[idx], Assembler::AVX_256bit);
3419   }
3420   // result += vresult.reduceLanes(ADD);
3421   for (int idx = 0; idx < 4; idx++) {
3422     reduceI(Op_AddReductionVI, 256/(sizeof(jint) * 8), result, result, vresult[idx], vtmp[(idx * 2 + 0) % 4], vtmp[(idx * 2 + 1) % 4]);
3423   }
3424 
3425   // } else if (cnt1 < 32) {
3426 
3427   bind(SHORT_UNROLLED_BEGIN);
3428   // int i = 1;
3429   movl(index, 1);
3430   cmpl(index, cnt1);
3431   jcc(Assembler::greaterEqual, SHORT_UNROLLED_LOOP_EXIT);
3432 
3433   // for (; i < cnt1 ; i += 2) {
3434   bind(SHORT_UNROLLED_LOOP_BEGIN);
3435   movl(tmp3, 961);
3436   imull(result, tmp3);
3437   arrays_hashcode_elload(tmp2, Address(ary1, index, Address::times(elsize), -elsize), eltype);
< prev index next >