< prev index next >

src/hotspot/cpu/x86/stubGenerator_x86_32.cpp

Print this page




 872     if (entry != NULL) {
 873       *entry = __ pc(); // Entry point from conjoint arraycopy stub.
 874       BLOCK_COMMENT("Entry:");
 875     }
 876 
 877     if (t == T_OBJECT) {
 878       __ testl(count, count);
 879       __ jcc(Assembler::zero, L_0_count);
 880     }
 881 
 882     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
 883     if (dest_uninitialized) {
 884       decorators |= IS_DEST_UNINITIALIZED;
 885     }
 886     if (aligned) {
 887       decorators |= ARRAYCOPY_ALIGNED;
 888     }
 889 
 890     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 891     bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
 892     {
 893       bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT));
 894       // UnsafeCopyMemory page error: continue after ucm
 895       UnsafeCopyMemoryMark ucmm(this, add_entry, true);
 896       __ subptr(to, from); // to --> to_from
 897       __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
 898       __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
 899       if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 900         // align source address at 4 bytes address boundary
 901         if (t == T_BYTE) {
 902           // One byte misalignment happens only for byte arrays
 903           __ testl(from, 1);
 904           __ jccb(Assembler::zero, L_skip_align1);
 905           __ movb(rax, Address(from, 0));
 906           __ movb(Address(from, to_from, Address::times_1, 0), rax);
 907           __ increment(from);
 908           __ decrement(count);
 909         __ BIND(L_skip_align1);
 910         }
 911         // Two bytes misalignment happens only for byte and short (char) arrays
 912         __ testl(from, 2);
 913         __ jccb(Assembler::zero, L_skip_align2);
 914         __ movw(rax, Address(from, 0));
 915         __ movw(Address(from, to_from, Address::times_1, 0), rax);
 916         __ addptr(from, 2);
 917         __ subl(count, 1<<(shift-1));
 918       __ BIND(L_skip_align2);















 919       }
 920       if (!VM_Version::supports_mmx()) {
 921         __ mov(rax, count);      // save 'count'
 922         __ shrl(count, shift); // bytes count
 923         __ addptr(to_from, from);// restore 'to'
 924         __ rep_mov();
 925         __ subptr(to_from, from);// restore 'to_from'
 926         __ mov(count, rax);      // restore 'count'
 927         __ jmpb(L_copy_2_bytes); // all dwords were copied
 928       } else {
 929         if (!UseUnalignedLoadStores) {
 930           // align to 8 bytes, we know we are 4 byte aligned to start
 931           __ testptr(from, 4);
 932           __ jccb(Assembler::zero, L_copy_64_bytes);
 933           __ movl(rax, Address(from, 0));
 934           __ movl(Address(from, to_from, Address::times_1, 0), rax);
 935           __ addptr(from, 4);
 936           __ subl(count, 1<<shift);
 937          }
 938       __ BIND(L_copy_64_bytes);
 939         __ mov(rax, count);
 940         __ shrl(rax, shift+1);  // 8 bytes chunk count
 941         //
 942         // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
 943         //
 944         if (UseXMMForArrayCopy) {
 945           xmm_copy_forward(from, to_from, rax);
 946         } else {
 947           mmx_copy_forward(from, to_from, rax);
 948         }
 949       }
 950       // copy tailing dword
 951     __ BIND(L_copy_4_bytes);
 952       __ testl(count, 1<<shift);
 953       __ jccb(Assembler::zero, L_copy_2_bytes);
 954       __ movl(rax, Address(from, 0));
 955       __ movl(Address(from, to_from, Address::times_1, 0), rax);
 956       if (t == T_BYTE || t == T_SHORT) {
 957         __ addptr(from, 4);
 958       __ BIND(L_copy_2_bytes);
 959         // copy tailing word
 960         __ testl(count, 1<<(shift-1));
 961         __ jccb(Assembler::zero, L_copy_byte);
 962         __ movw(rax, Address(from, 0));
 963         __ movw(Address(from, to_from, Address::times_1, 0), rax);
 964         if (t == T_BYTE) {
 965           __ addptr(from, 2);
 966         __ BIND(L_copy_byte);
 967           // copy tailing byte
 968           __ testl(count, 1);
 969           __ jccb(Assembler::zero, L_exit);
 970           __ movb(rax, Address(from, 0));
 971           __ movb(Address(from, to_from, Address::times_1, 0), rax);
 972         __ BIND(L_exit);
 973         } else {
 974         __ BIND(L_copy_byte);
 975         }
 976       } else {
 977       __ BIND(L_copy_2_bytes);
 978       }


 979     }
 980 
 981     if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
 982       __ emms();
 983     }
 984     __ movl(count, Address(rsp, 12+12)); // reread 'count'
 985     bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
 986 
 987     if (t == T_OBJECT) {
 988     __ BIND(L_0_count);
 989     }
 990     inc_copy_counter_np(t);
 991     __ pop(rdi);
 992     __ pop(rsi);
 993     __ leave(); // required for proper stackwalking of RuntimeStub frame
 994     __ vzeroupper();
 995     __ xorptr(rax, rax); // return 0
 996     __ ret(0);
 997     return start;
 998   }
 999 
1000 
1001   address generate_fill(BasicType t, bool aligned, const char *name) {
1002     __ align(CodeEntryAlignment);
1003     StubCodeMark mark(this, "StubRoutines", name);


1069     __ jump_cc(Assembler::belowEqual, nooverlap);
1070     __ cmpptr(dst, end);
1071     __ jump_cc(Assembler::aboveEqual, nooverlap);
1072 
1073     if (t == T_OBJECT) {
1074       __ testl(count, count);
1075       __ jcc(Assembler::zero, L_0_count);
1076     }
1077 
1078     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1079     if (dest_uninitialized) {
1080       decorators |= IS_DEST_UNINITIALIZED;
1081     }
1082     if (aligned) {
1083       decorators |= ARRAYCOPY_ALIGNED;
1084     }
1085 
1086     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
1087     bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
1088 
1089     {
1090       bool add_entry = (t != T_OBJECT && (!aligned || t == T_INT));
1091       // UnsafeCopyMemory page error: continue after ucm
1092       UnsafeCopyMemoryMark ucmm(this, add_entry, true);
1093       // copy from high to low

















1094       __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1095       __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
1096       if (t == T_BYTE || t == T_SHORT) {
1097         // Align the end of destination array at 4 bytes address boundary
1098         __ lea(end, Address(dst, count, sf, 0));
1099         if (t == T_BYTE) {
1100           // One byte misalignment happens only for byte arrays
1101           __ testl(end, 1);
1102           __ jccb(Assembler::zero, L_skip_align1);
1103           __ decrement(count);
1104           __ movb(rdx, Address(from, count, sf, 0));
1105           __ movb(Address(to, count, sf, 0), rdx);
1106         __ BIND(L_skip_align1);
1107         }
1108         // Two bytes misalignment happens only for byte and short (char) arrays
1109         __ testl(end, 2);
1110         __ jccb(Assembler::zero, L_skip_align2);
1111         __ subptr(count, 1<<(shift-1));
1112         __ movw(rdx, Address(from, count, sf, 0));
1113         __ movw(Address(to, count, sf, 0), rdx);
1114       __ BIND(L_skip_align2);
1115         __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1116         __ jcc(Assembler::below, L_copy_4_bytes);
1117       }
1118 
1119       if (!VM_Version::supports_mmx()) {
1120         __ std();
1121         __ mov(rax, count); // Save 'count'
1122         __ mov(rdx, to);    // Save 'to'
1123         __ lea(rsi, Address(from, count, sf, -4));
1124         __ lea(rdi, Address(to  , count, sf, -4));
1125         __ shrptr(count, shift); // bytes count
1126         __ rep_mov();
1127         __ cld();
1128         __ mov(count, rax); // restore 'count'
1129         __ andl(count, (1<<shift)-1);      // mask the number of rest elements
1130         __ movptr(from, Address(rsp, 12+4)); // reread 'from'
1131         __ mov(to, rdx);   // restore 'to'
1132         __ jmpb(L_copy_2_bytes); // all dword were copied
1133       } else {
1134         // Align to 8 bytes the end of array. It is aligned to 4 bytes already.
1135         __ testptr(end, 4);
1136         __ jccb(Assembler::zero, L_copy_8_bytes);
1137         __ subl(count, 1<<shift);
1138         __ movl(rdx, Address(from, count, sf, 0));
1139         __ movl(Address(to, count, sf, 0), rdx);
1140         __ jmpb(L_copy_8_bytes);
1141 
1142         __ align(OptoLoopAlignment);
1143         // Move 8 bytes
1144       __ BIND(L_copy_8_bytes_loop);
1145         if (UseXMMForArrayCopy) {
1146           __ movq(xmm0, Address(from, count, sf, 0));
1147           __ movq(Address(to, count, sf, 0), xmm0);
1148         } else {
1149           __ movq(mmx0, Address(from, count, sf, 0));
1150           __ movq(Address(to, count, sf, 0), mmx0);
1151         }
1152       __ BIND(L_copy_8_bytes);
1153         __ subl(count, 2<<shift);
1154         __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1155         __ addl(count, 2<<shift);
1156         if (!UseXMMForArrayCopy) {
1157           __ emms();
1158         }
1159       }
1160     __ BIND(L_copy_4_bytes);
1161       // copy prefix qword
1162       __ testl(count, 1<<shift);
1163       __ jccb(Assembler::zero, L_copy_2_bytes);
1164       __ movl(rdx, Address(from, count, sf, -4));
1165       __ movl(Address(to, count, sf, -4), rdx);
1166 
1167       if (t == T_BYTE || t == T_SHORT) {
1168           __ subl(count, (1<<shift));
1169         __ BIND(L_copy_2_bytes);
1170           // copy prefix dword
1171           __ testl(count, 1<<(shift-1));
1172           __ jccb(Assembler::zero, L_copy_byte);
1173           __ movw(rdx, Address(from, count, sf, -2));
1174           __ movw(Address(to, count, sf, -2), rdx);
1175           if (t == T_BYTE) {
1176             __ subl(count, 1<<(shift-1));
1177           __ BIND(L_copy_byte);
1178             // copy prefix byte
1179             __ testl(count, 1);
1180             __ jccb(Assembler::zero, L_exit);
1181             __ movb(rdx, Address(from, 0));
1182             __ movb(Address(to, 0), rdx);
1183           __ BIND(L_exit);
1184           } else {
1185           __ BIND(L_copy_byte);
1186           }
1187       } else {
1188       __ BIND(L_copy_2_bytes);
1189       }
1190     }






1191 
1192     if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1193       __ emms();




















1194     }

1195     __ movl2ptr(count, Address(rsp, 12+12)); // reread count
1196     bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
1197 
1198     if (t == T_OBJECT) {
1199     __ BIND(L_0_count);
1200     }
1201     inc_copy_counter_np(t);
1202     __ pop(rdi);
1203     __ pop(rsi);
1204     __ leave(); // required for proper stackwalking of RuntimeStub frame
1205     __ xorptr(rax, rax); // return 0
1206     __ ret(0);
1207     return start;
1208   }
1209 
1210 
1211   address generate_disjoint_long_copy(address* entry, const char *name) {
1212     __ align(CodeEntryAlignment);
1213     StubCodeMark mark(this, "StubRoutines", name);
1214     address start = __ pc();
1215 
1216     Label L_copy_8_bytes, L_copy_8_bytes_loop;
1217     const Register from       = rax;  // source array address
1218     const Register to         = rdx;  // destination array address
1219     const Register count      = rcx;  // elements count
1220     const Register to_from    = rdx;  // (to - from)
1221 
1222     __ enter(); // required for proper stackwalking of RuntimeStub frame
1223     __ movptr(from , Address(rsp, 8+0));       // from
1224     __ movptr(to   , Address(rsp, 8+4));       // to
1225     __ movl2ptr(count, Address(rsp, 8+8));     // count
1226 
1227     *entry = __ pc(); // Entry point from conjoint arraycopy stub.
1228     BLOCK_COMMENT("Entry:");
1229 
1230     {
1231       // UnsafeCopyMemory page error: continue after ucm
1232       UnsafeCopyMemoryMark ucmm(this, true, true);
1233       __ subptr(to, from); // to --> to_from
1234       if (VM_Version::supports_mmx()) {
1235         if (UseXMMForArrayCopy) {
1236           xmm_copy_forward(from, to_from, count);
1237         } else {
1238           mmx_copy_forward(from, to_from, count);
1239         }
1240       } else {
1241         __ jmpb(L_copy_8_bytes);
1242         __ align(OptoLoopAlignment);
1243       __ BIND(L_copy_8_bytes_loop);
1244         __ fild_d(Address(from, 0));
1245         __ fistp_d(Address(from, to_from, Address::times_1));
1246         __ addptr(from, 8);
1247       __ BIND(L_copy_8_bytes);
1248         __ decrement(count);
1249         __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1250       }
1251     }
1252     if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1253       __ emms();







1254     }
1255     inc_copy_counter_np(T_LONG);
1256     __ leave(); // required for proper stackwalking of RuntimeStub frame
1257     __ vzeroupper();
1258     __ xorptr(rax, rax); // return 0
1259     __ ret(0);
1260     return start;
1261   }
1262 
1263   address generate_conjoint_long_copy(address nooverlap_target,
1264                                       address* entry, const char *name) {
1265     __ align(CodeEntryAlignment);
1266     StubCodeMark mark(this, "StubRoutines", name);
1267     address start = __ pc();
1268 
1269     Label L_copy_8_bytes, L_copy_8_bytes_loop;
1270     const Register from       = rax;  // source array address
1271     const Register to         = rdx;  // destination array address
1272     const Register count      = rcx;  // elements count
1273     const Register end_from   = rax;  // source array end address
1274 
1275     __ enter(); // required for proper stackwalking of RuntimeStub frame
1276     __ movptr(from , Address(rsp, 8+0));       // from
1277     __ movptr(to   , Address(rsp, 8+4));       // to
1278     __ movl2ptr(count, Address(rsp, 8+8));     // count
1279 
1280     *entry = __ pc(); // Entry point from generic arraycopy stub.
1281     BLOCK_COMMENT("Entry:");
1282 
1283     // arrays overlap test
1284     __ cmpptr(to, from);
1285     RuntimeAddress nooverlap(nooverlap_target);
1286     __ jump_cc(Assembler::belowEqual, nooverlap);
1287     __ lea(end_from, Address(from, count, Address::times_8, 0));
1288     __ cmpptr(to, end_from);
1289     __ movptr(from, Address(rsp, 8));  // from
1290     __ jump_cc(Assembler::aboveEqual, nooverlap);
1291 
1292     {
1293       // UnsafeCopyMemory page error: continue after ucm
1294       UnsafeCopyMemoryMark ucmm(this, true, true);
1295 
1296       __ jmpb(L_copy_8_bytes);
1297 
1298       __ align(OptoLoopAlignment);
1299     __ BIND(L_copy_8_bytes_loop);
1300       if (VM_Version::supports_mmx()) {
1301         if (UseXMMForArrayCopy) {
1302           __ movq(xmm0, Address(from, count, Address::times_8));
1303           __ movq(Address(to, count, Address::times_8), xmm0);
1304         } else {
1305           __ movq(mmx0, Address(from, count, Address::times_8));
1306           __ movq(Address(to, count, Address::times_8), mmx0);
1307         }
1308       } else {
1309         __ fild_d(Address(from, count, Address::times_8));
1310         __ fistp_d(Address(to, count, Address::times_8));
1311       }
1312     __ BIND(L_copy_8_bytes);
1313       __ decrement(count);
1314       __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1315 
1316     }




1317     if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1318       __ emms();
1319     }
1320     inc_copy_counter_np(T_LONG);
1321     __ leave(); // required for proper stackwalking of RuntimeStub frame
1322     __ xorptr(rax, rax); // return 0
1323     __ ret(0);
1324     return start;
1325   }
1326 
1327 
1328   // Helper for generating a dynamic type check.
1329   // The sub_klass must be one of {rbx, rdx, rsi}.
1330   // The temp is killed.
1331   void generate_type_check(Register sub_klass,
1332                            Address& super_check_offset_addr,
1333                            Address& super_klass_addr,
1334                            Register temp,
1335                            Label* L_success, Label* L_failure) {
1336     BLOCK_COMMENT("type_check:");


3955     // Safefetch stubs.
3956     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3957                                                    &StubRoutines::_safefetch32_fault_pc,
3958                                                    &StubRoutines::_safefetch32_continuation_pc);
3959     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
3960     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
3961     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
3962   }
3963 
3964 
3965  public:
3966   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3967     if (all) {
3968       generate_all();
3969     } else {
3970       generate_initial();
3971     }
3972   }
3973 }; // end class declaration
3974 
3975 #define UCM_TABLE_MAX_ENTRIES 8
3976 void StubGenerator_generate(CodeBuffer* code, bool all) {
3977   if (UnsafeCopyMemory::_table == NULL) {
3978     UnsafeCopyMemory::create_table(UCM_TABLE_MAX_ENTRIES);
3979   }
3980   StubGenerator g(code, all);
3981 }


 872     if (entry != NULL) {
 873       *entry = __ pc(); // Entry point from conjoint arraycopy stub.
 874       BLOCK_COMMENT("Entry:");
 875     }
 876 
 877     if (t == T_OBJECT) {
 878       __ testl(count, count);
 879       __ jcc(Assembler::zero, L_0_count);
 880     }
 881 
 882     DecoratorSet decorators = IN_HEAP | IS_ARRAY | ARRAYCOPY_DISJOINT;
 883     if (dest_uninitialized) {
 884       decorators |= IS_DEST_UNINITIALIZED;
 885     }
 886     if (aligned) {
 887       decorators |= ARRAYCOPY_ALIGNED;
 888     }
 889 
 890     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
 891     bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
 892 
 893     __ subptr(to, from); // to --> to_from
 894     __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
 895     __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
 896     if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
 897       // align source address at 4 bytes address boundary
 898       if (t == T_BYTE) {
 899         // One byte misalignment happens only for byte arrays
 900         __ testl(from, 1);
 901         __ jccb(Assembler::zero, L_skip_align1);
 902         __ movb(rax, Address(from, 0));
 903         __ movb(Address(from, to_from, Address::times_1, 0), rax);
 904         __ increment(from);
 905         __ decrement(count);
 906       __ BIND(L_skip_align1);
 907       }
 908       // Two bytes misalignment happens only for byte and short (char) arrays
 909       __ testl(from, 2);
 910       __ jccb(Assembler::zero, L_skip_align2);
 911       __ movw(rax, Address(from, 0));
 912       __ movw(Address(from, to_from, Address::times_1, 0), rax);
 913       __ addptr(from, 2);
 914       __ subl(count, 1<<(shift-1));
 915     __ BIND(L_skip_align2);
 916     }
 917     if (!VM_Version::supports_mmx()) {
 918       __ mov(rax, count);      // save 'count'
 919       __ shrl(count, shift); // bytes count
 920       __ addptr(to_from, from);// restore 'to'
 921       __ rep_mov();
 922       __ subptr(to_from, from);// restore 'to_from'
 923       __ mov(count, rax);      // restore 'count'
 924       __ jmpb(L_copy_2_bytes); // all dwords were copied
 925     } else {
 926       if (!UseUnalignedLoadStores) {
 927         // align to 8 bytes, we know we are 4 byte aligned to start
 928         __ testptr(from, 4);
 929         __ jccb(Assembler::zero, L_copy_64_bytes);
 930         __ movl(rax, Address(from, 0));
 931         __ movl(Address(from, to_from, Address::times_1, 0), rax);
 932         __ addptr(from, 4);
 933         __ subl(count, 1<<shift);
 934       }
 935     __ BIND(L_copy_64_bytes);
 936       __ mov(rax, count);
 937       __ shrl(rax, shift+1);  // 8 bytes chunk count
 938       //
 939       // Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
 940       //
 941       if (UseXMMForArrayCopy) {
 942         xmm_copy_forward(from, to_from, rax);
 943       } else {
 944         mmx_copy_forward(from, to_from, rax);



















 945       }
 946     }
 947     // copy tailing dword
 948   __ BIND(L_copy_4_bytes);
 949     __ testl(count, 1<<shift);
 950     __ jccb(Assembler::zero, L_copy_2_bytes);
 951     __ movl(rax, Address(from, 0));
 952     __ movl(Address(from, to_from, Address::times_1, 0), rax);
 953     if (t == T_BYTE || t == T_SHORT) {
 954       __ addptr(from, 4);
 955     __ BIND(L_copy_2_bytes);
 956       // copy tailing word
 957       __ testl(count, 1<<(shift-1));
 958       __ jccb(Assembler::zero, L_copy_byte);
 959       __ movw(rax, Address(from, 0));
 960       __ movw(Address(from, to_from, Address::times_1, 0), rax);
 961       if (t == T_BYTE) {
 962         __ addptr(from, 2);
 963       __ BIND(L_copy_byte);
 964         // copy tailing byte
 965         __ testl(count, 1);
 966         __ jccb(Assembler::zero, L_exit);
 967         __ movb(rax, Address(from, 0));
 968         __ movb(Address(from, to_from, Address::times_1, 0), rax);
 969       __ BIND(L_exit);


 970       } else {
 971       __ BIND(L_copy_byte);
 972       }
 973     } else {
 974     __ BIND(L_copy_2_bytes);
 975     }
 976 



 977     __ movl(count, Address(rsp, 12+12)); // reread 'count'
 978     bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
 979 
 980     if (t == T_OBJECT) {
 981     __ BIND(L_0_count);
 982     }
 983     inc_copy_counter_np(t);
 984     __ pop(rdi);
 985     __ pop(rsi);
 986     __ leave(); // required for proper stackwalking of RuntimeStub frame
 987     __ vzeroupper();
 988     __ xorptr(rax, rax); // return 0
 989     __ ret(0);
 990     return start;
 991   }
 992 
 993 
 994   address generate_fill(BasicType t, bool aligned, const char *name) {
 995     __ align(CodeEntryAlignment);
 996     StubCodeMark mark(this, "StubRoutines", name);


1062     __ jump_cc(Assembler::belowEqual, nooverlap);
1063     __ cmpptr(dst, end);
1064     __ jump_cc(Assembler::aboveEqual, nooverlap);
1065 
1066     if (t == T_OBJECT) {
1067       __ testl(count, count);
1068       __ jcc(Assembler::zero, L_0_count);
1069     }
1070 
1071     DecoratorSet decorators = IN_HEAP | IS_ARRAY;
1072     if (dest_uninitialized) {
1073       decorators |= IS_DEST_UNINITIALIZED;
1074     }
1075     if (aligned) {
1076       decorators |= ARRAYCOPY_ALIGNED;
1077     }
1078 
1079     BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
1080     bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
1081 
1082     // copy from high to low
1083     __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1084     __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
1085     if (t == T_BYTE || t == T_SHORT) {
1086       // Align the end of destination array at 4 bytes address boundary
1087       __ lea(end, Address(dst, count, sf, 0));
1088       if (t == T_BYTE) {
1089         // One byte misalignment happens only for byte arrays
1090         __ testl(end, 1);
1091         __ jccb(Assembler::zero, L_skip_align1);
1092         __ decrement(count);
1093         __ movb(rdx, Address(from, count, sf, 0));
1094         __ movb(Address(to, count, sf, 0), rdx);
1095       __ BIND(L_skip_align1);
1096       }
1097       // Two bytes misalignment happens only for byte and short (char) arrays
1098       __ testl(end, 2);
1099       __ jccb(Assembler::zero, L_skip_align2);
1100       __ subptr(count, 1<<(shift-1));
1101       __ movw(rdx, Address(from, count, sf, 0));
1102       __ movw(Address(to, count, sf, 0), rdx);
1103     __ BIND(L_skip_align2);
1104       __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
1105       __ jcc(Assembler::below, L_copy_4_bytes);
1106     }
1107 
1108     if (!VM_Version::supports_mmx()) {
1109       __ std();
1110       __ mov(rax, count); // Save 'count'
1111       __ mov(rdx, to);    // Save 'to'
1112       __ lea(rsi, Address(from, count, sf, -4));
1113       __ lea(rdi, Address(to  , count, sf, -4));
1114       __ shrptr(count, shift); // bytes count
1115       __ rep_mov();
1116       __ cld();
1117       __ mov(count, rax); // restore 'count'
1118       __ andl(count, (1<<shift)-1);      // mask the number of rest elements
1119       __ movptr(from, Address(rsp, 12+4)); // reread 'from'
1120       __ mov(to, rdx);   // restore 'to'
1121       __ jmpb(L_copy_2_bytes); // all dword were copied
1122    } else {
1123       // Align to 8 bytes the end of array. It is aligned to 4 bytes already.
1124       __ testptr(end, 4);
1125       __ jccb(Assembler::zero, L_copy_8_bytes);
1126       __ subl(count, 1<<shift);
1127       __ movl(rdx, Address(from, count, sf, 0));
1128       __ movl(Address(to, count, sf, 0), rdx);
1129       __ jmpb(L_copy_8_bytes);
1130 
1131       __ align(OptoLoopAlignment);
1132       // Move 8 bytes
1133     __ BIND(L_copy_8_bytes_loop);
1134       if (UseXMMForArrayCopy) {
1135         __ movq(xmm0, Address(from, count, sf, 0));
1136         __ movq(Address(to, count, sf, 0), xmm0);






1137       } else {
1138         __ movq(mmx0, Address(from, count, sf, 0));
1139         __ movq(Address(to, count, sf, 0), mmx0);























1140       }
1141     __ BIND(L_copy_8_bytes);
1142       __ subl(count, 2<<shift);
1143       __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1144       __ addl(count, 2<<shift);
1145       if (!UseXMMForArrayCopy) {
1146         __ emms();























1147       }
1148     }
1149   __ BIND(L_copy_4_bytes);
1150     // copy prefix qword
1151     __ testl(count, 1<<shift);
1152     __ jccb(Assembler::zero, L_copy_2_bytes);
1153     __ movl(rdx, Address(from, count, sf, -4));
1154     __ movl(Address(to, count, sf, -4), rdx);
1155 
1156     if (t == T_BYTE || t == T_SHORT) {
1157         __ subl(count, (1<<shift));
1158       __ BIND(L_copy_2_bytes);
1159         // copy prefix dword
1160         __ testl(count, 1<<(shift-1));
1161         __ jccb(Assembler::zero, L_copy_byte);
1162         __ movw(rdx, Address(from, count, sf, -2));
1163         __ movw(Address(to, count, sf, -2), rdx);
1164         if (t == T_BYTE) {
1165           __ subl(count, 1<<(shift-1));
1166         __ BIND(L_copy_byte);
1167           // copy prefix byte
1168           __ testl(count, 1);
1169           __ jccb(Assembler::zero, L_exit);
1170           __ movb(rdx, Address(from, 0));
1171           __ movb(Address(to, 0), rdx);
1172         __ BIND(L_exit);
1173         } else {
1174         __ BIND(L_copy_byte);
1175         }
1176     } else {
1177     __ BIND(L_copy_2_bytes);
1178     }
1179 
1180     __ movl2ptr(count, Address(rsp, 12+12)); // reread count
1181     bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
1182 
1183     if (t == T_OBJECT) {
1184     __ BIND(L_0_count);
1185     }
1186     inc_copy_counter_np(t);
1187     __ pop(rdi);
1188     __ pop(rsi);
1189     __ leave(); // required for proper stackwalking of RuntimeStub frame
1190     __ xorptr(rax, rax); // return 0
1191     __ ret(0);
1192     return start;
1193   }
1194 
1195 
1196   address generate_disjoint_long_copy(address* entry, const char *name) {
1197     __ align(CodeEntryAlignment);
1198     StubCodeMark mark(this, "StubRoutines", name);
1199     address start = __ pc();
1200 
1201     Label L_copy_8_bytes, L_copy_8_bytes_loop;
1202     const Register from       = rax;  // source array address
1203     const Register to         = rdx;  // destination array address
1204     const Register count      = rcx;  // elements count
1205     const Register to_from    = rdx;  // (to - from)
1206 
1207     __ enter(); // required for proper stackwalking of RuntimeStub frame
1208     __ movptr(from , Address(rsp, 8+0));       // from
1209     __ movptr(to   , Address(rsp, 8+4));       // to
1210     __ movl2ptr(count, Address(rsp, 8+8));     // count
1211 
1212     *entry = __ pc(); // Entry point from conjoint arraycopy stub.
1213     BLOCK_COMMENT("Entry:");
1214 
1215     __ subptr(to, from); // to --> to_from
1216     if (VM_Version::supports_mmx()) {
1217       if (UseXMMForArrayCopy) {
1218         xmm_copy_forward(from, to_from, count);






1219       } else {
1220         mmx_copy_forward(from, to_from, count);








1221       }
1222     } else {
1223       __ jmpb(L_copy_8_bytes);
1224       __ align(OptoLoopAlignment);
1225     __ BIND(L_copy_8_bytes_loop);
1226       __ fild_d(Address(from, 0));
1227       __ fistp_d(Address(from, to_from, Address::times_1));
1228       __ addptr(from, 8);
1229     __ BIND(L_copy_8_bytes);
1230       __ decrement(count);
1231       __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1232     }
1233     inc_copy_counter_np(T_LONG);
1234     __ leave(); // required for proper stackwalking of RuntimeStub frame
1235     __ vzeroupper();
1236     __ xorptr(rax, rax); // return 0
1237     __ ret(0);
1238     return start;
1239   }
1240 
1241   address generate_conjoint_long_copy(address nooverlap_target,
1242                                       address* entry, const char *name) {
1243     __ align(CodeEntryAlignment);
1244     StubCodeMark mark(this, "StubRoutines", name);
1245     address start = __ pc();
1246 
1247     Label L_copy_8_bytes, L_copy_8_bytes_loop;
1248     const Register from       = rax;  // source array address
1249     const Register to         = rdx;  // destination array address
1250     const Register count      = rcx;  // elements count
1251     const Register end_from   = rax;  // source array end address
1252 
1253     __ enter(); // required for proper stackwalking of RuntimeStub frame
1254     __ movptr(from , Address(rsp, 8+0));       // from
1255     __ movptr(to   , Address(rsp, 8+4));       // to
1256     __ movl2ptr(count, Address(rsp, 8+8));     // count
1257 
1258     *entry = __ pc(); // Entry point from generic arraycopy stub.
1259     BLOCK_COMMENT("Entry:");
1260 
1261     // arrays overlap test
1262     __ cmpptr(to, from);
1263     RuntimeAddress nooverlap(nooverlap_target);
1264     __ jump_cc(Assembler::belowEqual, nooverlap);
1265     __ lea(end_from, Address(from, count, Address::times_8, 0));
1266     __ cmpptr(to, end_from);
1267     __ movptr(from, Address(rsp, 8));  // from
1268     __ jump_cc(Assembler::aboveEqual, nooverlap);
1269 
1270     __ jmpb(L_copy_8_bytes);




1271 
1272     __ align(OptoLoopAlignment);
1273   __ BIND(L_copy_8_bytes_loop);
1274     if (VM_Version::supports_mmx()) {
1275       if (UseXMMForArrayCopy) {
1276         __ movq(xmm0, Address(from, count, Address::times_8));
1277         __ movq(Address(to, count, Address::times_8), xmm0);




1278       } else {
1279         __ movq(mmx0, Address(from, count, Address::times_8));
1280         __ movq(Address(to, count, Address::times_8), mmx0);
1281       }
1282     } else {
1283       __ fild_d(Address(from, count, Address::times_8));
1284       __ fistp_d(Address(to, count, Address::times_8));

1285     }
1286   __ BIND(L_copy_8_bytes);
1287     __ decrement(count);
1288     __ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
1289 
1290     if (VM_Version::supports_mmx() && !UseXMMForArrayCopy) {
1291       __ emms();
1292     }
1293     inc_copy_counter_np(T_LONG);
1294     __ leave(); // required for proper stackwalking of RuntimeStub frame
1295     __ xorptr(rax, rax); // return 0
1296     __ ret(0);
1297     return start;
1298   }
1299 
1300 
1301   // Helper for generating a dynamic type check.
1302   // The sub_klass must be one of {rbx, rdx, rsi}.
1303   // The temp is killed.
1304   void generate_type_check(Register sub_klass,
1305                            Address& super_check_offset_addr,
1306                            Address& super_klass_addr,
1307                            Register temp,
1308                            Label* L_success, Label* L_failure) {
1309     BLOCK_COMMENT("type_check:");


3928     // Safefetch stubs.
3929     generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
3930                                                    &StubRoutines::_safefetch32_fault_pc,
3931                                                    &StubRoutines::_safefetch32_continuation_pc);
3932     StubRoutines::_safefetchN_entry           = StubRoutines::_safefetch32_entry;
3933     StubRoutines::_safefetchN_fault_pc        = StubRoutines::_safefetch32_fault_pc;
3934     StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
3935   }
3936 
3937 
3938  public:
3939   StubGenerator(CodeBuffer* code, bool all) : StubCodeGenerator(code) {
3940     if (all) {
3941       generate_all();
3942     } else {
3943       generate_initial();
3944     }
3945   }
3946 }; // end class declaration
3947 
3948 
3949 void StubGenerator_generate(CodeBuffer* code, bool all) {



3950   StubGenerator g(code, all);
3951 }
< prev index next >