1626 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1627 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1628 __ jmp(*op->stub()->entry());
1629 } else {
1630 Register tmp1 = op->tmp1()->as_register();
1631 Register tmp2 = op->tmp2()->as_register();
1632 Register tmp3 = op->tmp3()->as_register();
1633 if (len == tmp1) {
1634 tmp1 = tmp3;
1635 } else if (len == tmp2) {
1636 tmp2 = tmp3;
1637 } else if (len == tmp3) {
1638 // everything is ok
1639 } else {
1640 __ mov(tmp3, len);
1641 }
1642 __ allocate_array(op->obj()->as_register(),
1643 len,
1644 tmp1,
1645 tmp2,
1646 arrayOopDesc::header_size(op->type()),
1647 array_element_size(op->type()),
1648 op->klass()->as_register(),
1649 *op->stub()->entry());
1650 }
1651 __ bind(*op->stub()->continuation());
1652 }
1653
1654 void LIR_Assembler::type_profile_helper(Register mdo,
1655 ciMethodData *md, ciProfileData *data,
1656 Register recv, Label* update_done) {
1657 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1658 Label next_test;
1659 // See if the receiver is receiver[n].
1660 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1661 __ jccb(Assembler::notEqual, next_test);
1662 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1663 __ addptr(data_addr, DataLayout::counter_increment);
1664 __ jmp(*update_done);
1665 __ bind(next_test);
1666 }
3051 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3052 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3053 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3054 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3055 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3056 }
3057
3058
3059 // This code replaces a call to arraycopy; no exception may
3060 // be thrown in this code, they must be thrown in the System.arraycopy
3061 // activation frame; we could save some checks if this would not be the case
3062 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3063 ciArrayKlass* default_type = op->expected_type();
3064 Register src = op->src()->as_register();
3065 Register dst = op->dst()->as_register();
3066 Register src_pos = op->src_pos()->as_register();
3067 Register dst_pos = op->dst_pos()->as_register();
3068 Register length = op->length()->as_register();
3069 Register tmp = op->tmp()->as_register();
3070 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3071
3072 CodeStub* stub = op->stub();
3073 int flags = op->flags();
3074 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3075 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3076
3077 // if we don't know anything, just go through the generic arraycopy
3078 if (default_type == NULL) {
3079 // save outgoing arguments on stack in case call to System.arraycopy is needed
3080 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3081 // for interpreter calling conventions. Now we have to do it in new style conventions.
3082 // For the moment until C1 gets the new register allocator I just force all the
3083 // args to the right place (except the register args) and then on the back side
3084 // reload the register args properly if we go slow path. Yuck
3085
3086 // These are proper for the calling convention
3087 store_parameter(length, 2);
3088 store_parameter(dst_pos, 1);
3089 store_parameter(dst, 0);
3090
3242 if (flags & LIR_OpArrayCopy::dst_range_check) {
3243 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3244 __ cmpl(tmp, dst_length_addr);
3245 __ jcc(Assembler::above, *stub->entry());
3246 }
3247
3248 if (flags & LIR_OpArrayCopy::length_positive_check) {
3249 __ testl(length, length);
3250 __ jcc(Assembler::less, *stub->entry());
3251 }
3252
3253 #ifdef _LP64
3254 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3255 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3256 #endif
3257
3258 if (flags & LIR_OpArrayCopy::type_check) {
3259 // We don't know the array types are compatible
3260 if (basic_type != T_OBJECT) {
3261 // Simple test for basic type arrays
3262 if (UseCompressedClassPointers) {
3263 __ movl(tmp, src_klass_addr);
3264 __ cmpl(tmp, dst_klass_addr);
3265 } else {
3266 __ movptr(tmp, src_klass_addr);
3267 __ cmpptr(tmp, dst_klass_addr);
3268 }
3269 __ jcc(Assembler::notEqual, *stub->entry());
3270 } else {
3271 // For object arrays, if src is a sub class of dst then we can
3272 // safely do the copy.
3273 Label cont, slow;
3274
3275 __ push(src);
3276 __ push(dst);
3277
3278 __ load_klass(src, src, tmp_load_klass);
3279 __ load_klass(dst, dst, tmp_load_klass);
3280
3281 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3282
3283 __ push(src);
3284 __ push(dst);
3285 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3286 __ pop(dst);
3287 __ pop(src);
3288
3408 __ pop(src);
3409 }
3410 }
3411
3412 #ifdef ASSERT
3413 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3414 // Sanity check the known type with the incoming class. For the
3415 // primitive case the types must match exactly with src.klass and
3416 // dst.klass each exactly matching the default type. For the
3417 // object array case, if no type check is needed then either the
3418 // dst type is exactly the expected type and the src type is a
3419 // subtype which we can't check or src is the same array as dst
3420 // but not necessarily exactly of type default_type.
3421 Label known_ok, halt;
3422 __ mov_metadata(tmp, default_type->constant_encoding());
3423 #ifdef _LP64
3424 if (UseCompressedClassPointers) {
3425 __ encode_klass_not_null(tmp, rscratch1);
3426 }
3427 #endif
3428
3429 if (basic_type != T_OBJECT) {
3430
3431 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3432 else __ cmpptr(tmp, dst_klass_addr);
3433 __ jcc(Assembler::notEqual, halt);
3434 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3435 else __ cmpptr(tmp, src_klass_addr);
3436 __ jcc(Assembler::equal, known_ok);
3437 } else {
3438 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3439 else __ cmpptr(tmp, dst_klass_addr);
3440 __ jcc(Assembler::equal, known_ok);
3441 __ cmpptr(src, dst);
3442 __ jcc(Assembler::equal, known_ok);
3443 }
3444 __ bind(halt);
3445 __ stop("incorrect type information in arraycopy");
3446 __ bind(known_ok);
3447 }
3448 #endif
3449
3450 #ifndef PRODUCT
3451 if (PrintC1Statistics) {
3452 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3453 }
3454 #endif
3455
3456 #ifdef _LP64
3457 assert_different_registers(c_rarg0, dst, dst_pos, length);
3458 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3459 assert_different_registers(c_rarg1, length);
3481 assert(op->crc()->is_single_cpu(), "crc must be register");
3482 assert(op->val()->is_single_cpu(), "byte value must be register");
3483 assert(op->result_opr()->is_single_cpu(), "result must be register");
3484 Register crc = op->crc()->as_register();
3485 Register val = op->val()->as_register();
3486 Register res = op->result_opr()->as_register();
3487
3488 assert_different_registers(val, crc, res);
3489
3490 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3491 __ notl(crc); // ~crc
3492 __ update_byte_crc32(crc, val, res);
3493 __ notl(crc); // ~crc
3494 __ mov(res, crc);
3495 }
3496
3497 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3498 Register obj = op->obj_opr()->as_register(); // may not be an oop
3499 Register hdr = op->hdr_opr()->as_register();
3500 Register lock = op->lock_opr()->as_register();
3501 if (!UseFastLocking) {
3502 __ jmp(*op->stub()->entry());
3503 } else if (op->code() == lir_lock) {
3504 Register scratch = noreg;
3505 if (UseBiasedLocking) {
3506 scratch = op->scratch_opr()->as_register();
3507 }
3508 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3509 // add debug info for NullPointerException only if one is possible
3510 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3511 if (op->info() != NULL) {
3512 add_debug_info_for_null_check(null_check_offset, op->info());
3513 }
3514 // done
3515 } else if (op->code() == lir_unlock) {
3516 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3517 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3518 } else {
3519 Unimplemented();
3520 }
3521 __ bind(*op->stub()->continuation());
3522 }
3523
3524 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3525 Register obj = op->obj()->as_pointer_register();
3526 Register result = op->result_opr()->as_pointer_register();
3527
3528 CodeEmitInfo* info = op->info();
3529 if (info != NULL) {
3530 add_debug_info_for_null_check_here(info);
3531 }
3532
3533 #ifdef _LP64
3534 if (UseCompressedClassPointers) {
3535 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3536 __ decode_klass_not_null(result, rscratch1);
3537 } else
3538 #endif
3539 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3540 }
3541
3542 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3543 ciMethod* method = op->profiled_method();
3544 int bci = op->profiled_bci();
3545 ciMethod* callee = op->profiled_callee();
3546 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3547
3548 // Update counter for all call types
3549 ciMethodData* md = method->method_data_or_null();
3550 assert(md != NULL, "Sanity");
3551 ciProfileData* data = md->bci_to_data(bci);
3552 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3553 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3554 Register mdo = op->mdo()->as_register();
3645 #else
3646 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index());
3647 #endif
3648 }
3649 #endif
3650 if (do_null) {
3651 __ testptr(obj, obj);
3652 __ jccb(Assembler::notZero, update);
3653 if (!TypeEntries::was_null_seen(current_klass)) {
3654 __ testptr(mdo_addr, TypeEntries::null_seen);
3655 #ifndef ASSERT
3656 __ jccb(Assembler::notZero, next); // already set
3657 #else
3658 __ jcc(Assembler::notZero, next); // already set
3659 #endif
3660 // atomic update to prevent overwriting Klass* with 0
3661 __ lock();
3662 __ orptr(mdo_addr, TypeEntries::null_seen);
3663 }
3664 if (do_update) {
3665 #ifndef ASSERT
3666 __ jmpb(next);
3667 }
3668 #else
3669 __ jmp(next);
3670 }
3671 } else {
3672 __ testptr(obj, obj);
3673 __ jcc(Assembler::notZero, update);
3674 __ stop("unexpect null obj");
3675 #endif
3676 }
3677
3678 __ bind(update);
3679
3680 if (do_update) {
3681 #ifdef ASSERT
3682 if (exact_klass != NULL) {
3683 Label ok;
3684 __ load_klass(tmp, obj, tmp_load_klass);
3685 __ push(tmp);
3686 __ mov_metadata(tmp, exact_klass->constant_encoding());
3687 __ cmpptr(tmp, Address(rsp, 0));
3688 __ jcc(Assembler::equal, ok);
3689 __ stop("exact klass and actual klass differ");
3690 __ bind(ok);
|
1626 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1627 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1628 __ jmp(*op->stub()->entry());
1629 } else {
1630 Register tmp1 = op->tmp1()->as_register();
1631 Register tmp2 = op->tmp2()->as_register();
1632 Register tmp3 = op->tmp3()->as_register();
1633 if (len == tmp1) {
1634 tmp1 = tmp3;
1635 } else if (len == tmp2) {
1636 tmp2 = tmp3;
1637 } else if (len == tmp3) {
1638 // everything is ok
1639 } else {
1640 __ mov(tmp3, len);
1641 }
1642 __ allocate_array(op->obj()->as_register(),
1643 len,
1644 tmp1,
1645 tmp2,
1646 arrayOopDesc::base_offset_in_bytes(op->type()),
1647 array_element_size(op->type()),
1648 op->klass()->as_register(),
1649 *op->stub()->entry());
1650 }
1651 __ bind(*op->stub()->continuation());
1652 }
1653
1654 void LIR_Assembler::type_profile_helper(Register mdo,
1655 ciMethodData *md, ciProfileData *data,
1656 Register recv, Label* update_done) {
1657 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1658 Label next_test;
1659 // See if the receiver is receiver[n].
1660 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1661 __ jccb(Assembler::notEqual, next_test);
1662 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1663 __ addptr(data_addr, DataLayout::counter_increment);
1664 __ jmp(*update_done);
1665 __ bind(next_test);
1666 }
3051 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3052 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3053 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3054 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3055 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m);
3056 }
3057
3058
3059 // This code replaces a call to arraycopy; no exception may
3060 // be thrown in this code, they must be thrown in the System.arraycopy
3061 // activation frame; we could save some checks if this would not be the case
3062 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3063 ciArrayKlass* default_type = op->expected_type();
3064 Register src = op->src()->as_register();
3065 Register dst = op->dst()->as_register();
3066 Register src_pos = op->src_pos()->as_register();
3067 Register dst_pos = op->dst_pos()->as_register();
3068 Register length = op->length()->as_register();
3069 Register tmp = op->tmp()->as_register();
3070 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3071 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
3072
3073 CodeStub* stub = op->stub();
3074 int flags = op->flags();
3075 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3076 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3077
3078 // if we don't know anything, just go through the generic arraycopy
3079 if (default_type == NULL) {
3080 // save outgoing arguments on stack in case call to System.arraycopy is needed
3081 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3082 // for interpreter calling conventions. Now we have to do it in new style conventions.
3083 // For the moment until C1 gets the new register allocator I just force all the
3084 // args to the right place (except the register args) and then on the back side
3085 // reload the register args properly if we go slow path. Yuck
3086
3087 // These are proper for the calling convention
3088 store_parameter(length, 2);
3089 store_parameter(dst_pos, 1);
3090 store_parameter(dst, 0);
3091
3243 if (flags & LIR_OpArrayCopy::dst_range_check) {
3244 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3245 __ cmpl(tmp, dst_length_addr);
3246 __ jcc(Assembler::above, *stub->entry());
3247 }
3248
3249 if (flags & LIR_OpArrayCopy::length_positive_check) {
3250 __ testl(length, length);
3251 __ jcc(Assembler::less, *stub->entry());
3252 }
3253
3254 #ifdef _LP64
3255 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3256 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3257 #endif
3258
3259 if (flags & LIR_OpArrayCopy::type_check) {
3260 // We don't know the array types are compatible
3261 if (basic_type != T_OBJECT) {
3262 // Simple test for basic type arrays
3263 __ cmp_klass(src, dst, tmp, tmp2);
3264 __ jcc(Assembler::notEqual, *stub->entry());
3265 } else {
3266 // For object arrays, if src is a sub class of dst then we can
3267 // safely do the copy.
3268 Label cont, slow;
3269
3270 __ push(src);
3271 __ push(dst);
3272
3273 __ load_klass(src, src, tmp_load_klass);
3274 __ load_klass(dst, dst, tmp_load_klass);
3275
3276 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3277
3278 __ push(src);
3279 __ push(dst);
3280 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3281 __ pop(dst);
3282 __ pop(src);
3283
3403 __ pop(src);
3404 }
3405 }
3406
3407 #ifdef ASSERT
3408 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3409 // Sanity check the known type with the incoming class. For the
3410 // primitive case the types must match exactly with src.klass and
3411 // dst.klass each exactly matching the default type. For the
3412 // object array case, if no type check is needed then either the
3413 // dst type is exactly the expected type and the src type is a
3414 // subtype which we can't check or src is the same array as dst
3415 // but not necessarily exactly of type default_type.
3416 Label known_ok, halt;
3417 __ mov_metadata(tmp, default_type->constant_encoding());
3418 #ifdef _LP64
3419 if (UseCompressedClassPointers) {
3420 __ encode_klass_not_null(tmp, rscratch1);
3421 }
3422 #endif
3423 if (basic_type != T_OBJECT) {
3424 __ cmp_klass(tmp, dst, tmp2);
3425 __ jcc(Assembler::notEqual, halt);
3426 __ cmp_klass(tmp, src, tmp2);
3427 __ jcc(Assembler::equal, known_ok);
3428 } else {
3429 __ cmp_klass(tmp, dst, tmp2);
3430 __ jcc(Assembler::equal, known_ok);
3431 __ cmpptr(src, dst);
3432 __ jcc(Assembler::equal, known_ok);
3433 }
3434 __ bind(halt);
3435 __ stop("incorrect type information in arraycopy");
3436 __ bind(known_ok);
3437 }
3438 #endif
3439
3440 #ifndef PRODUCT
3441 if (PrintC1Statistics) {
3442 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)));
3443 }
3444 #endif
3445
3446 #ifdef _LP64
3447 assert_different_registers(c_rarg0, dst, dst_pos, length);
3448 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3449 assert_different_registers(c_rarg1, length);
3471 assert(op->crc()->is_single_cpu(), "crc must be register");
3472 assert(op->val()->is_single_cpu(), "byte value must be register");
3473 assert(op->result_opr()->is_single_cpu(), "result must be register");
3474 Register crc = op->crc()->as_register();
3475 Register val = op->val()->as_register();
3476 Register res = op->result_opr()->as_register();
3477
3478 assert_different_registers(val, crc, res);
3479
3480 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3481 __ notl(crc); // ~crc
3482 __ update_byte_crc32(crc, val, res);
3483 __ notl(crc); // ~crc
3484 __ mov(res, crc);
3485 }
3486
3487 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3488 Register obj = op->obj_opr()->as_register(); // may not be an oop
3489 Register hdr = op->hdr_opr()->as_register();
3490 Register lock = op->lock_opr()->as_register();
3491 if (LockingMode == LM_MONITOR) {
3492 __ jmp(*op->stub()->entry());
3493 } else if (op->code() == lir_lock) {
3494 Register scratch = noreg;
3495 if (UseBiasedLocking || LockingMode == LM_LIGHTWEIGHT) {
3496 scratch = op->scratch_opr()->as_register();
3497 }
3498 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3499 // add debug info for NullPointerException only if one is possible
3500 int null_check_offset = __ lock_object(hdr, obj, lock, scratch, *op->stub()->entry());
3501 if (op->info() != NULL) {
3502 add_debug_info_for_null_check(null_check_offset, op->info());
3503 }
3504 // done
3505 } else if (op->code() == lir_unlock) {
3506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3507 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3508 } else {
3509 Unimplemented();
3510 }
3511 __ bind(*op->stub()->continuation());
3512 }
3513
3514 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3515 Register obj = op->obj()->as_pointer_register();
3516 Register result = op->result_opr()->as_pointer_register();
3517
3518 CodeEmitInfo* info = op->info();
3519 if (info != NULL) {
3520 add_debug_info_for_null_check_here(info);
3521 }
3522
3523 #ifdef _LP64
3524 if (UseCompactObjectHeaders) {
3525 Register tmp = rscratch1;
3526 assert_different_registers(tmp, obj);
3527 assert_different_registers(tmp, result);
3528
3529 // Check if we can take the (common) fast path, if obj is unlocked.
3530 __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes()));
3531 __ testb(result, markWord::monitor_value);
3532 __ jcc(Assembler::notZero, *op->stub()->entry());
3533 __ bind(*op->stub()->continuation());
3534 // Fast-path: shift and decode Klass*.
3535 __ shrq(result, markWord::klass_shift);
3536 __ decode_klass_not_null(result, tmp);
3537 } else if (UseCompressedClassPointers) {
3538 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3539 __ decode_klass_not_null(result, rscratch1);
3540 } else
3541 #endif
3542 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3543 }
3544
3545 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3546 ciMethod* method = op->profiled_method();
3547 int bci = op->profiled_bci();
3548 ciMethod* callee = op->profiled_callee();
3549 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3550
3551 // Update counter for all call types
3552 ciMethodData* md = method->method_data_or_null();
3553 assert(md != NULL, "Sanity");
3554 ciProfileData* data = md->bci_to_data(bci);
3555 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3556 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
3557 Register mdo = op->mdo()->as_register();
3648 #else
3649 assert_different_registers(obj, tmp, mdo_addr.base(), mdo_addr.index());
3650 #endif
3651 }
3652 #endif
3653 if (do_null) {
3654 __ testptr(obj, obj);
3655 __ jccb(Assembler::notZero, update);
3656 if (!TypeEntries::was_null_seen(current_klass)) {
3657 __ testptr(mdo_addr, TypeEntries::null_seen);
3658 #ifndef ASSERT
3659 __ jccb(Assembler::notZero, next); // already set
3660 #else
3661 __ jcc(Assembler::notZero, next); // already set
3662 #endif
3663 // atomic update to prevent overwriting Klass* with 0
3664 __ lock();
3665 __ orptr(mdo_addr, TypeEntries::null_seen);
3666 }
3667 if (do_update) {
3668 __ jmp(next);
3669 }
3670 #ifdef ASSERT
3671 } else {
3672 __ testptr(obj, obj);
3673 __ jcc(Assembler::notZero, update);
3674 __ stop("unexpect null obj");
3675 #endif
3676 }
3677
3678 __ bind(update);
3679
3680 if (do_update) {
3681 #ifdef ASSERT
3682 if (exact_klass != NULL) {
3683 Label ok;
3684 __ load_klass(tmp, obj, tmp_load_klass);
3685 __ push(tmp);
3686 __ mov_metadata(tmp, exact_klass->constant_encoding());
3687 __ cmpptr(tmp, Address(rsp, 0));
3688 __ jcc(Assembler::equal, ok);
3689 __ stop("exact klass and actual klass differ");
3690 __ bind(ok);
|