1167 #else
1168 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1169 // push and pop the part at src + wordSize, adding wordSize for the previous push
1170 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1171 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1172 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1173 #endif // _LP64
1174
1175 } else {
1176 ShouldNotReachHere();
1177 }
1178 }
1179
1180
1181 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1182 assert(src->is_address(), "should not call otherwise");
1183 assert(dest->is_register(), "should not call otherwise");
1184
1185 LIR_Address* addr = src->as_address_ptr();
1186 Address from_addr = as_Address(addr);
1187 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
1188
1189 if (addr->base()->type() == T_OBJECT) {
1190 __ verify_oop(addr->base()->as_pointer_register());
1191 }
1192
1193 switch (type) {
1194 case T_BOOLEAN: // fall through
1195 case T_BYTE: // fall through
1196 case T_CHAR: // fall through
1197 case T_SHORT:
1198 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1199 // on pre P6 processors we may get partial register stalls
1200 // so blow away the value of to_rinfo before loading a
1201 // partial word into it. Do it here so that it precedes
1202 // the potential patch point below.
1203 __ xorptr(dest->as_register(), dest->as_register());
1204 }
1205 break;
1206 default:
1207 break;
1240 assert(dest->is_double_fpu(), "must be");
1241 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1242 __ fld_d(from_addr);
1243 #else
1244 ShouldNotReachHere();
1245 #endif // !LP64
1246 }
1247 break;
1248 }
1249
1250 case T_OBJECT: // fall through
1251 case T_ARRAY: // fall through
1252 if (UseCompressedOops && !wide) {
1253 __ movl(dest->as_register(), from_addr);
1254 } else {
1255 __ movptr(dest->as_register(), from_addr);
1256 }
1257 break;
1258
1259 case T_ADDRESS:
1260 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1261 __ movl(dest->as_register(), from_addr);
1262 } else {
1263 __ movptr(dest->as_register(), from_addr);
1264 }
1265 break;
1266 case T_INT:
1267 __ movl(dest->as_register(), from_addr);
1268 break;
1269
1270 case T_LONG: {
1271 Register to_lo = dest->as_register_lo();
1272 Register to_hi = dest->as_register_hi();
1273 #ifdef _LP64
1274 __ movptr(to_lo, as_Address_lo(addr));
1275 #else
1276 Register base = addr->base()->as_register();
1277 Register index = noreg;
1278 if (addr->index()->is_register()) {
1279 index = addr->index()->as_register();
1280 }
1281 if ((base == to_lo && index == to_hi) ||
1282 (base == to_hi && index == to_lo)) {
1283 // addresses with 2 registers are only formed as a result of
1284 // array access so this code will never have to deal with
1350
1351 default:
1352 ShouldNotReachHere();
1353 }
1354
1355 if (patch != NULL) {
1356 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1357 }
1358
1359 if (is_reference_type(type)) {
1360 #ifdef _LP64
1361 if (UseCompressedOops && !wide) {
1362 __ decode_heap_oop(dest->as_register());
1363 }
1364 #endif
1365
1366 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1367 if (!UseZGC) {
1368 __ verify_oop(dest->as_register());
1369 }
1370 } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
1371 #ifdef _LP64
1372 if (UseCompressedClassPointers) {
1373 __ decode_klass_not_null(dest->as_register(), tmp_load_klass);
1374 }
1375 #endif
1376 }
1377 }
1378
1379
1380 NEEDS_CLEANUP; // This could be static?
1381 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1382 int elem_size = type2aelembytes(type);
1383 switch (elem_size) {
1384 case 1: return Address::times_1;
1385 case 2: return Address::times_2;
1386 case 4: return Address::times_4;
1387 case 8: return Address::times_8;
1388 }
1389 ShouldNotReachHere();
1390 return Address::no_scale;
1391 }
1392
1393
1394 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1395 switch (op->code()) {
1637 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1638 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1639 __ jmp(*op->stub()->entry());
1640 } else {
1641 Register tmp1 = op->tmp1()->as_register();
1642 Register tmp2 = op->tmp2()->as_register();
1643 Register tmp3 = op->tmp3()->as_register();
1644 if (len == tmp1) {
1645 tmp1 = tmp3;
1646 } else if (len == tmp2) {
1647 tmp2 = tmp3;
1648 } else if (len == tmp3) {
1649 // everything is ok
1650 } else {
1651 __ mov(tmp3, len);
1652 }
1653 __ allocate_array(op->obj()->as_register(),
1654 len,
1655 tmp1,
1656 tmp2,
1657 arrayOopDesc::header_size(op->type()),
1658 array_element_size(op->type()),
1659 op->klass()->as_register(),
1660 *op->stub()->entry());
1661 }
1662 __ bind(*op->stub()->continuation());
1663 }
1664
1665 void LIR_Assembler::type_profile_helper(Register mdo,
1666 ciMethodData *md, ciProfileData *data,
1667 Register recv, Label* update_done) {
1668 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1669 Label next_test;
1670 // See if the receiver is receiver[n].
1671 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1672 __ jccb(Assembler::notEqual, next_test);
1673 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1674 __ addptr(data_addr, DataLayout::counter_increment);
1675 __ jmp(*update_done);
1676 __ bind(next_test);
1677 }
3062 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3063 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3064 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3065 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3066 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3067 }
3068
3069
3070 // This code replaces a call to arraycopy; no exception may
3071 // be thrown in this code, they must be thrown in the System.arraycopy
3072 // activation frame; we could save some checks if this would not be the case
3073 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3074 ciArrayKlass* default_type = op->expected_type();
3075 Register src = op->src()->as_register();
3076 Register dst = op->dst()->as_register();
3077 Register src_pos = op->src_pos()->as_register();
3078 Register dst_pos = op->dst_pos()->as_register();
3079 Register length = op->length()->as_register();
3080 Register tmp = op->tmp()->as_register();
3081 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3082
3083 CodeStub* stub = op->stub();
3084 int flags = op->flags();
3085 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3086 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3087
3088 // if we don't know anything, just go through the generic arraycopy
3089 if (default_type == NULL) {
3090 // save outgoing arguments on stack in case call to System.arraycopy is needed
3091 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3092 // for interpreter calling conventions. Now we have to do it in new style conventions.
3093 // For the moment until C1 gets the new register allocator I just force all the
3094 // args to the right place (except the register args) and then on the back side
3095 // reload the register args properly if we go slow path. Yuck
3096
3097 // These are proper for the calling convention
3098 store_parameter(length, 2);
3099 store_parameter(dst_pos, 1);
3100 store_parameter(dst, 0);
3101
3253 if (flags & LIR_OpArrayCopy::dst_range_check) {
3254 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3255 __ cmpl(tmp, dst_length_addr);
3256 __ jcc(Assembler::above, *stub->entry());
3257 }
3258
3259 if (flags & LIR_OpArrayCopy::length_positive_check) {
3260 __ testl(length, length);
3261 __ jcc(Assembler::less, *stub->entry());
3262 }
3263
3264 #ifdef _LP64
3265 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3266 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3267 #endif
3268
3269 if (flags & LIR_OpArrayCopy::type_check) {
3270 // We don't know the array types are compatible
3271 if (basic_type != T_OBJECT) {
3272 // Simple test for basic type arrays
3273 if (UseCompressedClassPointers) {
3274 __ movl(tmp, src_klass_addr);
3275 __ cmpl(tmp, dst_klass_addr);
3276 } else {
3277 __ movptr(tmp, src_klass_addr);
3278 __ cmpptr(tmp, dst_klass_addr);
3279 }
3280 __ jcc(Assembler::notEqual, *stub->entry());
3281 } else {
3282 // For object arrays, if src is a sub class of dst then we can
3283 // safely do the copy.
3284 Label cont, slow;
3285
3286 __ push(src);
3287 __ push(dst);
3288
3289 __ load_klass(src, src, tmp_load_klass);
3290 __ load_klass(dst, dst, tmp_load_klass);
3291
3292 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3293
3294 __ push(src);
3295 __ push(dst);
3296 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3297 __ pop(dst);
3298 __ pop(src);
3299
3419 __ pop(src);
3420 }
3421 }
3422
3423 #ifdef ASSERT
3424 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3425 // Sanity check the known type with the incoming class. For the
3426 // primitive case the types must match exactly with src.klass and
3427 // dst.klass each exactly matching the default type. For the
3428 // object array case, if no type check is needed then either the
3429 // dst type is exactly the expected type and the src type is a
3430 // subtype which we can't check or src is the same array as dst
3431 // but not necessarily exactly of type default_type.
3432 Label known_ok, halt;
3433 __ mov_metadata(tmp, default_type->constant_encoding());
3434 #ifdef _LP64
3435 if (UseCompressedClassPointers) {
3436 __ encode_klass_not_null(tmp, rscratch1);
3437 }
3438 #endif
3439
3440 if (basic_type != T_OBJECT) {
3441
3442 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3443 else __ cmpptr(tmp, dst_klass_addr);
3444 __ jcc(Assembler::notEqual, halt);
3445 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3446 else __ cmpptr(tmp, src_klass_addr);
3447 __ jcc(Assembler::equal, known_ok);
3448 } else {
3449 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3450 else __ cmpptr(tmp, dst_klass_addr);
3451 __ jcc(Assembler::equal, known_ok);
3452 __ cmpptr(src, dst);
3453 __ jcc(Assembler::equal, known_ok);
3454 }
3455 __ bind(halt);
3456 __ stop("incorrect type information in arraycopy");
3457 __ bind(known_ok);
3458 }
3459 #endif
3460
3461 #ifndef PRODUCT
3462 if (PrintC1Statistics) {
3463 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3464 }
3465 #endif
3466
3467 #ifdef _LP64
3468 assert_different_registers(c_rarg0, dst, dst_pos, length);
3469 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3470 assert_different_registers(c_rarg1, length);
3492 assert(op->crc()->is_single_cpu(), "crc must be register");
3493 assert(op->val()->is_single_cpu(), "byte value must be register");
3494 assert(op->result_opr()->is_single_cpu(), "result must be register");
3495 Register crc = op->crc()->as_register();
3496 Register val = op->val()->as_register();
3497 Register res = op->result_opr()->as_register();
3498
3499 assert_different_registers(val, crc, res);
3500
3501 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3502 __ notl(crc); // ~crc
3503 __ update_byte_crc32(crc, val, res);
3504 __ notl(crc); // ~crc
3505 __ mov(res, crc);
3506 }
3507
3508 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3509 Register obj = op->obj_opr()->as_register(); // may not be an oop
3510 Register hdr = op->hdr_opr()->as_register();
3511 Register lock = op->lock_opr()->as_register();
3512 if (!UseFastLocking) {
3513 __ jmp(*op->stub()->entry());
3514 } else if (op->code() == lir_lock) {
3515 Register scratch = noreg;
3516 if (UseBiasedLocking) {
3517 scratch = op->scratch_opr()->as_register();
3518 }
3519 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3520 // add debug info for NullPointerException only if one is possible
3521 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3522 if (op->info() != NULL) {
3523 add_debug_info_for_null_check(null_check_offset, op->info());
3524 }
3525 // done
3526 } else if (op->code() == lir_unlock) {
3527 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3528 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3529 } else {
3530 Unimplemented();
3531 }
3532 __ bind(*op->stub()->continuation());
3533 }
3534
3535
3536 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3537 ciMethod* method = op->profiled_method();
3538 int bci = op->profiled_bci();
3539 ciMethod* callee = op->profiled_callee();
3540 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3541
3542 // Update counter for all call types
3543 ciMethodData* md = method->method_data_or_null();
3544 assert(md != NULL, "Sanity");
3545 ciProfileData* data = md->bci_to_data(bci);
3546 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3547 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3548 Register mdo = op->mdo()->as_register();
3549 __ mov_metadata(mdo, md->constant_encoding());
3550 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3551 // Perform additional virtual call profiling for invokevirtual and
3552 // invokeinterface bytecodes
3553 if (op->should_profile_receiver_type()) {
3554 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3619
3620 bool do_null = !not_null;
3621 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3622 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3623
3624 assert(do_null || do_update, "why are we here?");
3625 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3626
3627 __ verify_oop(obj);
3628
3629 if (tmp != obj) {
3630 __ mov(tmp, obj);
3631 }
3632 if (do_null) {
3633 __ testptr(tmp, tmp);
3634 __ jccb(Assembler::notZero, update);
3635 if (!TypeEntries::was_null_seen(current_klass)) {
3636 __ orptr(mdo_addr, TypeEntries::null_seen);
3637 }
3638 if (do_update) {
3639 #ifndef ASSERT
3640 __ jmpb(next);
3641 }
3642 #else
3643 __ jmp(next);
3644 }
3645 } else {
3646 __ testptr(tmp, tmp);
3647 __ jcc(Assembler::notZero, update);
3648 __ stop("unexpect null obj");
3649 #endif
3650 }
3651
3652 __ bind(update);
3653
3654 if (do_update) {
3655 #ifdef ASSERT
3656 if (exact_klass != NULL) {
3657 Label ok;
3658 __ load_klass(tmp, tmp, tmp_load_klass);
3659 __ push(tmp);
3660 __ mov_metadata(tmp, exact_klass->constant_encoding());
3661 __ cmpptr(tmp, Address(rsp, 0));
3662 __ jcc(Assembler::equal, ok);
3663 __ stop("exact klass and actual klass differ");
3664 __ bind(ok);
|
1167 #else
1168 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
1169 // push and pop the part at src + wordSize, adding wordSize for the previous push
1170 __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 2 * wordSize));
1171 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 2 * wordSize));
1172 __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
1173 #endif // _LP64
1174
1175 } else {
1176 ShouldNotReachHere();
1177 }
1178 }
1179
1180
1181 void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
1182 assert(src->is_address(), "should not call otherwise");
1183 assert(dest->is_register(), "should not call otherwise");
1184
1185 LIR_Address* addr = src->as_address_ptr();
1186 Address from_addr = as_Address(addr);
1187
1188 if (addr->base()->type() == T_OBJECT) {
1189 __ verify_oop(addr->base()->as_pointer_register());
1190 }
1191
1192 switch (type) {
1193 case T_BOOLEAN: // fall through
1194 case T_BYTE: // fall through
1195 case T_CHAR: // fall through
1196 case T_SHORT:
1197 if (!VM_Version::is_P6() && !from_addr.uses(dest->as_register())) {
1198 // on pre P6 processors we may get partial register stalls
1199 // so blow away the value of to_rinfo before loading a
1200 // partial word into it. Do it here so that it precedes
1201 // the potential patch point below.
1202 __ xorptr(dest->as_register(), dest->as_register());
1203 }
1204 break;
1205 default:
1206 break;
1239 assert(dest->is_double_fpu(), "must be");
1240 assert(dest->fpu_regnrLo() == 0, "dest must be TOS");
1241 __ fld_d(from_addr);
1242 #else
1243 ShouldNotReachHere();
1244 #endif // !LP64
1245 }
1246 break;
1247 }
1248
1249 case T_OBJECT: // fall through
1250 case T_ARRAY: // fall through
1251 if (UseCompressedOops && !wide) {
1252 __ movl(dest->as_register(), from_addr);
1253 } else {
1254 __ movptr(dest->as_register(), from_addr);
1255 }
1256 break;
1257
1258 case T_ADDRESS:
1259 __ movptr(dest->as_register(), from_addr);
1260 break;
1261 case T_INT:
1262 __ movl(dest->as_register(), from_addr);
1263 break;
1264
1265 case T_LONG: {
1266 Register to_lo = dest->as_register_lo();
1267 Register to_hi = dest->as_register_hi();
1268 #ifdef _LP64
1269 __ movptr(to_lo, as_Address_lo(addr));
1270 #else
1271 Register base = addr->base()->as_register();
1272 Register index = noreg;
1273 if (addr->index()->is_register()) {
1274 index = addr->index()->as_register();
1275 }
1276 if ((base == to_lo && index == to_hi) ||
1277 (base == to_hi && index == to_lo)) {
1278 // addresses with 2 registers are only formed as a result of
1279 // array access so this code will never have to deal with
1345
1346 default:
1347 ShouldNotReachHere();
1348 }
1349
1350 if (patch != NULL) {
1351 patching_epilog(patch, patch_code, addr->base()->as_register(), info);
1352 }
1353
1354 if (is_reference_type(type)) {
1355 #ifdef _LP64
1356 if (UseCompressedOops && !wide) {
1357 __ decode_heap_oop(dest->as_register());
1358 }
1359 #endif
1360
1361 // Load barrier has not yet been applied, so ZGC can't verify the oop here
1362 if (!UseZGC) {
1363 __ verify_oop(dest->as_register());
1364 }
1365 }
1366 }
1367
1368
1369 NEEDS_CLEANUP; // This could be static?
1370 Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
1371 int elem_size = type2aelembytes(type);
1372 switch (elem_size) {
1373 case 1: return Address::times_1;
1374 case 2: return Address::times_2;
1375 case 4: return Address::times_4;
1376 case 8: return Address::times_8;
1377 }
1378 ShouldNotReachHere();
1379 return Address::no_scale;
1380 }
1381
1382
1383 void LIR_Assembler::emit_op3(LIR_Op3* op) {
1384 switch (op->code()) {
1626 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1627 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1628 __ jmp(*op->stub()->entry());
1629 } else {
1630 Register tmp1 = op->tmp1()->as_register();
1631 Register tmp2 = op->tmp2()->as_register();
1632 Register tmp3 = op->tmp3()->as_register();
1633 if (len == tmp1) {
1634 tmp1 = tmp3;
1635 } else if (len == tmp2) {
1636 tmp2 = tmp3;
1637 } else if (len == tmp3) {
1638 // everything is ok
1639 } else {
1640 __ mov(tmp3, len);
1641 }
1642 __ allocate_array(op->obj()->as_register(),
1643 len,
1644 tmp1,
1645 tmp2,
1646 arrayOopDesc::base_offset_in_bytes(op->type()),
1647 array_element_size(op->type()),
1648 op->klass()->as_register(),
1649 *op->stub()->entry());
1650 }
1651 __ bind(*op->stub()->continuation());
1652 }
1653
1654 void LIR_Assembler::type_profile_helper(Register mdo,
1655 ciMethodData *md, ciProfileData *data,
1656 Register recv, Label* update_done) {
1657 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1658 Label next_test;
1659 // See if the receiver is receiver[n].
1660 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1661 __ jccb(Assembler::notEqual, next_test);
1662 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1663 __ addptr(data_addr, DataLayout::counter_increment);
1664 __ jmp(*update_done);
1665 __ bind(next_test);
1666 }
3051 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3052 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3053 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3054 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3055 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3056 }
3057
3058
3059 // This code replaces a call to arraycopy; no exception may
3060 // be thrown in this code, they must be thrown in the System.arraycopy
3061 // activation frame; we could save some checks if this would not be the case
3062 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3063 ciArrayKlass* default_type = op->expected_type();
3064 Register src = op->src()->as_register();
3065 Register dst = op->dst()->as_register();
3066 Register src_pos = op->src_pos()->as_register();
3067 Register dst_pos = op->dst_pos()->as_register();
3068 Register length = op->length()->as_register();
3069 Register tmp = op->tmp()->as_register();
3070 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3071 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
3072
3073 CodeStub* stub = op->stub();
3074 int flags = op->flags();
3075 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3076 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3077
3078 // if we don't know anything, just go through the generic arraycopy
3079 if (default_type == NULL) {
3080 // save outgoing arguments on stack in case call to System.arraycopy is needed
3081 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3082 // for interpreter calling conventions. Now we have to do it in new style conventions.
3083 // For the moment until C1 gets the new register allocator I just force all the
3084 // args to the right place (except the register args) and then on the back side
3085 // reload the register args properly if we go slow path. Yuck
3086
3087 // These are proper for the calling convention
3088 store_parameter(length, 2);
3089 store_parameter(dst_pos, 1);
3090 store_parameter(dst, 0);
3091
3243 if (flags & LIR_OpArrayCopy::dst_range_check) {
3244 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3245 __ cmpl(tmp, dst_length_addr);
3246 __ jcc(Assembler::above, *stub->entry());
3247 }
3248
3249 if (flags & LIR_OpArrayCopy::length_positive_check) {
3250 __ testl(length, length);
3251 __ jcc(Assembler::less, *stub->entry());
3252 }
3253
3254 #ifdef _LP64
3255 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3256 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3257 #endif
3258
3259 if (flags & LIR_OpArrayCopy::type_check) {
3260 // We don't know the array types are compatible
3261 if (basic_type != T_OBJECT) {
3262 // Simple test for basic type arrays
3263 __ cmp_klass(src, dst, tmp, tmp2);
3264 __ jcc(Assembler::notEqual, *stub->entry());
3265 } else {
3266 // For object arrays, if src is a sub class of dst then we can
3267 // safely do the copy.
3268 Label cont, slow;
3269
3270 __ push(src);
3271 __ push(dst);
3272
3273 __ load_klass(src, src, tmp_load_klass);
3274 __ load_klass(dst, dst, tmp_load_klass);
3275
3276 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3277
3278 __ push(src);
3279 __ push(dst);
3280 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3281 __ pop(dst);
3282 __ pop(src);
3283
3403 __ pop(src);
3404 }
3405 }
3406
3407 #ifdef ASSERT
3408 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3409 // Sanity check the known type with the incoming class. For the
3410 // primitive case the types must match exactly with src.klass and
3411 // dst.klass each exactly matching the default type. For the
3412 // object array case, if no type check is needed then either the
3413 // dst type is exactly the expected type and the src type is a
3414 // subtype which we can't check or src is the same array as dst
3415 // but not necessarily exactly of type default_type.
3416 Label known_ok, halt;
3417 __ mov_metadata(tmp, default_type->constant_encoding());
3418 #ifdef _LP64
3419 if (UseCompressedClassPointers) {
3420 __ encode_klass_not_null(tmp, rscratch1);
3421 }
3422 #endif
3423 if (basic_type != T_OBJECT) {
3424 __ cmp_klass(tmp, dst, tmp2);
3425 __ jcc(Assembler::notEqual, halt);
3426 __ cmp_klass(tmp, src, tmp2);
3427 __ jcc(Assembler::equal, known_ok);
3428 } else {
3429 __ cmp_klass(tmp, dst, tmp2);
3430 __ jcc(Assembler::equal, known_ok);
3431 __ cmpptr(src, dst);
3432 __ jcc(Assembler::equal, known_ok);
3433 }
3434 __ bind(halt);
3435 __ stop("incorrect type information in arraycopy");
3436 __ bind(known_ok);
3437 }
3438 #endif
3439
3440 #ifndef PRODUCT
3441 if (PrintC1Statistics) {
3442 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3443 }
3444 #endif
3445
3446 #ifdef _LP64
3447 assert_different_registers(c_rarg0, dst, dst_pos, length);
3448 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3449 assert_different_registers(c_rarg1, length);
3471 assert(op->crc()->is_single_cpu(), "crc must be register");
3472 assert(op->val()->is_single_cpu(), "byte value must be register");
3473 assert(op->result_opr()->is_single_cpu(), "result must be register");
3474 Register crc = op->crc()->as_register();
3475 Register val = op->val()->as_register();
3476 Register res = op->result_opr()->as_register();
3477
3478 assert_different_registers(val, crc, res);
3479
3480 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3481 __ notl(crc); // ~crc
3482 __ update_byte_crc32(crc, val, res);
3483 __ notl(crc); // ~crc
3484 __ mov(res, crc);
3485 }
3486
3487 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3488 Register obj = op->obj_opr()->as_register(); // may not be an oop
3489 Register hdr = op->hdr_opr()->as_register();
3490 Register lock = op->lock_opr()->as_register();
3491 if (LockingMode == LM_MONITOR) {
3492 __ jmp(*op->stub()->entry());
3493 } else if (op->code() == lir_lock) {
3494 Register scratch = noreg;
3495 if (UseBiasedLocking || LockingMode == LM_LIGHTWEIGHT) {
3496 scratch = op->scratch_opr()->as_register();
3497 }
3498 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3499 // add debug info for NullPointerException only if one is possible
3500 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3501 if (op->info() != NULL) {
3502 add_debug_info_for_null_check(null_check_offset, op->info());
3503 }
3504 // done
3505 } else if (op->code() == lir_unlock) {
3506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3507 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3508 } else {
3509 Unimplemented();
3510 }
3511 __ bind(*op->stub()->continuation());
3512 }
3513
3514 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3515 Register obj = op->obj()->as_pointer_register();
3516 Register result = op->result_opr()->as_pointer_register();
3517
3518 CodeEmitInfo* info = op->info();
3519 if (info != NULL) {
3520 add_debug_info_for_null_check_here(info);
3521 }
3522
3523 #ifdef _LP64
3524 if (UseCompactObjectHeaders) {
3525 Register tmp = rscratch1;
3526 assert_different_registers(tmp, obj);
3527 assert_different_registers(tmp, result);
3528
3529 // Check if we can take the (common) fast path, if obj is unlocked.
3530 __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes()));
3531 __ testb(result, markWord::monitor_value);
3532 __ jcc(Assembler::notZero, *op->stub()->entry());
3533 __ bind(*op->stub()->continuation());
3534 // Fast-path: shift and decode Klass*.
3535 __ shrq(result, markWord::klass_shift);
3536 __ decode_klass_not_null(result, tmp);
3537 } else if (UseCompressedClassPointers) {
3538 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3539 __ decode_klass_not_null(result, rscratch1);
3540 } else
3541 #endif
3542 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3543 }
3544
3545 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3546 ciMethod* method = op->profiled_method();
3547 int bci = op->profiled_bci();
3548 ciMethod* callee = op->profiled_callee();
3549 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3550
3551 // Update counter for all call types
3552 ciMethodData* md = method->method_data_or_null();
3553 assert(md != NULL, "Sanity");
3554 ciProfileData* data = md->bci_to_data(bci);
3555 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3556 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3557 Register mdo = op->mdo()->as_register();
3558 __ mov_metadata(mdo, md->constant_encoding());
3559 Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3560 // Perform additional virtual call profiling for invokevirtual and
3561 // invokeinterface bytecodes
3562 if (op->should_profile_receiver_type()) {
3563 assert(op->recv()->is_single_cpu(), "recv must be allocated");
3628
3629 bool do_null = !not_null;
3630 bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
3631 bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
3632
3633 assert(do_null || do_update, "why are we here?");
3634 assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
3635
3636 __ verify_oop(obj);
3637
3638 if (tmp != obj) {
3639 __ mov(tmp, obj);
3640 }
3641 if (do_null) {
3642 __ testptr(tmp, tmp);
3643 __ jccb(Assembler::notZero, update);
3644 if (!TypeEntries::was_null_seen(current_klass)) {
3645 __ orptr(mdo_addr, TypeEntries::null_seen);
3646 }
3647 if (do_update) {
3648 __ jmp(next);
3649 }
3650 #ifdef ASSERT
3651 } else {
3652 __ testptr(tmp, tmp);
3653 __ jcc(Assembler::notZero, update);
3654 __ stop("unexpect null obj");
3655 #endif
3656 }
3657
3658 __ bind(update);
3659
3660 if (do_update) {
3661 #ifdef ASSERT
3662 if (exact_klass != NULL) {
3663 Label ok;
3664 __ load_klass(tmp, tmp, tmp_load_klass);
3665 __ push(tmp);
3666 __ mov_metadata(tmp, exact_klass->constant_encoding());
3667 __ cmpptr(tmp, Address(rsp, 0));
3668 __ jcc(Assembler::equal, ok);
3669 __ stop("exact klass and actual klass differ");
3670 __ bind(ok);
|