268 return as_Address(addr);
269 }
270
271
272 void LIR_Assembler::osr_entry() {
273 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
274 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
275 ValueStack* entry_state = osr_entry->state();
276 int number_of_locks = entry_state->locks_size();
277
278 // we jump here if osr happens with the interpreter
279 // state set up to continue at the beginning of the
280 // loop that triggered osr - in particular, we have
281 // the following registers setup:
282 //
283 // rcx: osr buffer
284 //
285
286 // build frame
287 ciMethod* m = compilation()->method();
288 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
289
290 // OSR buffer is
291 //
292 // locals[nlocals-1..0]
293 // monitors[0..number_of_locks]
294 //
295 // locals is a direct copy of the interpreter frame so in the osr buffer
296 // so first slot in the local array is the last local from the interpreter
297 // and last slot is local[0] (receiver) from the interpreter
298 //
299 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
300 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
301 // in the interpreter frame (the method lock if a sync method)
302
303 // Initialize monitors in the compiled activation.
304 // rcx: pointer to osr buffer
305 //
306 // All other registers are dead at this point and the locals will be
307 // copied into place by code emitted in the IR.
308
1618 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1619 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1620 __ jmp(*op->stub()->entry());
1621 } else {
1622 Register tmp1 = op->tmp1()->as_register();
1623 Register tmp2 = op->tmp2()->as_register();
1624 Register tmp3 = op->tmp3()->as_register();
1625 if (len == tmp1) {
1626 tmp1 = tmp3;
1627 } else if (len == tmp2) {
1628 tmp2 = tmp3;
1629 } else if (len == tmp3) {
1630 // everything is ok
1631 } else {
1632 __ mov(tmp3, len);
1633 }
1634 __ allocate_array(op->obj()->as_register(),
1635 len,
1636 tmp1,
1637 tmp2,
1638 arrayOopDesc::header_size(op->type()),
1639 array_element_size(op->type()),
1640 op->klass()->as_register(),
1641 *op->stub()->entry());
1642 }
1643 __ bind(*op->stub()->continuation());
1644 }
1645
1646 void LIR_Assembler::type_profile_helper(Register mdo,
1647 ciMethodData *md, ciProfileData *data,
1648 Register recv, Label* update_done) {
1649 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1650 Label next_test;
1651 // See if the receiver is receiver[n].
1652 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1653 __ jccb(Assembler::notEqual, next_test);
1654 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1655 __ addptr(data_addr, DataLayout::counter_increment);
1656 __ jmp(*update_done);
1657 __ bind(next_test);
1658 }
3049 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3050 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3051 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3052 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3053 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3054 }
3055
3056
3057 // This code replaces a call to arraycopy; no exception may
3058 // be thrown in this code, they must be thrown in the System.arraycopy
3059 // activation frame; we could save some checks if this would not be the case
3060 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3061 ciArrayKlass* default_type = op->expected_type();
3062 Register src = op->src()->as_register();
3063 Register dst = op->dst()->as_register();
3064 Register src_pos = op->src_pos()->as_register();
3065 Register dst_pos = op->dst_pos()->as_register();
3066 Register length = op->length()->as_register();
3067 Register tmp = op->tmp()->as_register();
3068 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3069
3070 CodeStub* stub = op->stub();
3071 int flags = op->flags();
3072 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3073 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3074
3075 // if we don't know anything, just go through the generic arraycopy
3076 if (default_type == NULL) {
3077 // save outgoing arguments on stack in case call to System.arraycopy is needed
3078 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3079 // for interpreter calling conventions. Now we have to do it in new style conventions.
3080 // For the moment until C1 gets the new register allocator I just force all the
3081 // args to the right place (except the register args) and then on the back side
3082 // reload the register args properly if we go slow path. Yuck
3083
3084 // These are proper for the calling convention
3085 store_parameter(length, 2);
3086 store_parameter(dst_pos, 1);
3087 store_parameter(dst, 0);
3088
3240 if (flags & LIR_OpArrayCopy::dst_range_check) {
3241 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3242 __ cmpl(tmp, dst_length_addr);
3243 __ jcc(Assembler::above, *stub->entry());
3244 }
3245
3246 if (flags & LIR_OpArrayCopy::length_positive_check) {
3247 __ testl(length, length);
3248 __ jcc(Assembler::less, *stub->entry());
3249 }
3250
3251 #ifdef _LP64
3252 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3253 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3254 #endif
3255
3256 if (flags & LIR_OpArrayCopy::type_check) {
3257 // We don't know the array types are compatible
3258 if (basic_type != T_OBJECT) {
3259 // Simple test for basic type arrays
3260 if (UseCompressedClassPointers) {
3261 __ movl(tmp, src_klass_addr);
3262 __ cmpl(tmp, dst_klass_addr);
3263 } else {
3264 __ movptr(tmp, src_klass_addr);
3265 __ cmpptr(tmp, dst_klass_addr);
3266 }
3267 __ jcc(Assembler::notEqual, *stub->entry());
3268 } else {
3269 // For object arrays, if src is a sub class of dst then we can
3270 // safely do the copy.
3271 Label cont, slow;
3272
3273 __ push(src);
3274 __ push(dst);
3275
3276 __ load_klass(src, src, tmp_load_klass);
3277 __ load_klass(dst, dst, tmp_load_klass);
3278
3279 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3280
3281 __ push(src);
3282 __ push(dst);
3283 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3284 __ pop(dst);
3285 __ pop(src);
3286
3406 __ pop(src);
3407 }
3408 }
3409
3410 #ifdef ASSERT
3411 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3412 // Sanity check the known type with the incoming class. For the
3413 // primitive case the types must match exactly with src.klass and
3414 // dst.klass each exactly matching the default type. For the
3415 // object array case, if no type check is needed then either the
3416 // dst type is exactly the expected type and the src type is a
3417 // subtype which we can't check or src is the same array as dst
3418 // but not necessarily exactly of type default_type.
3419 Label known_ok, halt;
3420 __ mov_metadata(tmp, default_type->constant_encoding());
3421 #ifdef _LP64
3422 if (UseCompressedClassPointers) {
3423 __ encode_klass_not_null(tmp, rscratch1);
3424 }
3425 #endif
3426
3427 if (basic_type != T_OBJECT) {
3428
3429 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3430 else __ cmpptr(tmp, dst_klass_addr);
3431 __ jcc(Assembler::notEqual, halt);
3432 if (UseCompressedClassPointers) __ cmpl(tmp, src_klass_addr);
3433 else __ cmpptr(tmp, src_klass_addr);
3434 __ jcc(Assembler::equal, known_ok);
3435 } else {
3436 if (UseCompressedClassPointers) __ cmpl(tmp, dst_klass_addr);
3437 else __ cmpptr(tmp, dst_klass_addr);
3438 __ jcc(Assembler::equal, known_ok);
3439 __ cmpptr(src, dst);
3440 __ jcc(Assembler::equal, known_ok);
3441 }
3442 __ bind(halt);
3443 __ stop("incorrect type information in arraycopy");
3444 __ bind(known_ok);
3445 }
3446 #endif
3447
3448 #ifndef PRODUCT
3449 if (PrintC1Statistics) {
3450 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
3451 }
3452 #endif
3453
3454 #ifdef _LP64
3455 assert_different_registers(c_rarg0, dst, dst_pos, length);
3456 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3457 assert_different_registers(c_rarg1, length);
3487
3488 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3489 __ notl(crc); // ~crc
3490 __ update_byte_crc32(crc, val, res);
3491 __ notl(crc); // ~crc
3492 __ mov(res, crc);
3493 }
3494
3495 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3496 Register obj = op->obj_opr()->as_register(); // may not be an oop
3497 Register hdr = op->hdr_opr()->as_register();
3498 Register lock = op->lock_opr()->as_register();
3499 if (UseHeavyMonitors) {
3500 if (op->info() != NULL) {
3501 add_debug_info_for_null_check_here(op->info());
3502 __ null_check(obj);
3503 }
3504 __ jmp(*op->stub()->entry());
3505 } else if (op->code() == lir_lock) {
3506 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3507 // add debug info for NullPointerException only if one is possible
3508 int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
3509 if (op->info() != NULL) {
3510 add_debug_info_for_null_check(null_check_offset, op->info());
3511 }
3512 // done
3513 } else if (op->code() == lir_unlock) {
3514 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3515 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3516 } else {
3517 Unimplemented();
3518 }
3519 __ bind(*op->stub()->continuation());
3520 }
3521
3522 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3523 Register obj = op->obj()->as_pointer_register();
3524 Register result = op->result_opr()->as_pointer_register();
3525
3526 CodeEmitInfo* info = op->info();
3527 if (info != NULL) {
3528 add_debug_info_for_null_check_here(info);
3529 }
3530
3531 #ifdef _LP64
3532 if (UseCompressedClassPointers) {
3533 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3534 __ decode_klass_not_null(result, rscratch1);
3535 } else
3536 #endif
3537 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3538 }
3539
3540 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3541 ciMethod* method = op->profiled_method();
3542 int bci = op->profiled_bci();
3543 ciMethod* callee = op->profiled_callee();
3544 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3545
3546 // Update counter for all call types
3547 ciMethodData* md = method->method_data_or_null();
3548 assert(md != NULL, "Sanity");
3549 ciProfileData* data = md->bci_to_data(bci);
3550 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3551 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
268 return as_Address(addr);
269 }
270
271
272 void LIR_Assembler::osr_entry() {
273 offsets()->set_value(CodeOffsets::OSR_Entry, code_offset());
274 BlockBegin* osr_entry = compilation()->hir()->osr_entry();
275 ValueStack* entry_state = osr_entry->state();
276 int number_of_locks = entry_state->locks_size();
277
278 // we jump here if osr happens with the interpreter
279 // state set up to continue at the beginning of the
280 // loop that triggered osr - in particular, we have
281 // the following registers setup:
282 //
283 // rcx: osr buffer
284 //
285
286 // build frame
287 ciMethod* m = compilation()->method();
288 __ build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes(), compilation()->max_monitors());
289
290 // OSR buffer is
291 //
292 // locals[nlocals-1..0]
293 // monitors[0..number_of_locks]
294 //
295 // locals is a direct copy of the interpreter frame so in the osr buffer
296 // so first slot in the local array is the last local from the interpreter
297 // and last slot is local[0] (receiver) from the interpreter
298 //
299 // Similarly with locks. The first lock slot in the osr buffer is the nth lock
300 // from the interpreter frame, the nth lock slot in the osr buffer is 0th lock
301 // in the interpreter frame (the method lock if a sync method)
302
303 // Initialize monitors in the compiled activation.
304 // rcx: pointer to osr buffer
305 //
306 // All other registers are dead at this point and the locals will be
307 // copied into place by code emitted in the IR.
308
1618 (!UseFastNewObjectArray && is_reference_type(op->type())) ||
1619 (!UseFastNewTypeArray && !is_reference_type(op->type()))) {
1620 __ jmp(*op->stub()->entry());
1621 } else {
1622 Register tmp1 = op->tmp1()->as_register();
1623 Register tmp2 = op->tmp2()->as_register();
1624 Register tmp3 = op->tmp3()->as_register();
1625 if (len == tmp1) {
1626 tmp1 = tmp3;
1627 } else if (len == tmp2) {
1628 tmp2 = tmp3;
1629 } else if (len == tmp3) {
1630 // everything is ok
1631 } else {
1632 __ mov(tmp3, len);
1633 }
1634 __ allocate_array(op->obj()->as_register(),
1635 len,
1636 tmp1,
1637 tmp2,
1638 arrayOopDesc::base_offset_in_bytes(op->type()),
1639 array_element_size(op->type()),
1640 op->klass()->as_register(),
1641 *op->stub()->entry());
1642 }
1643 __ bind(*op->stub()->continuation());
1644 }
1645
1646 void LIR_Assembler::type_profile_helper(Register mdo,
1647 ciMethodData *md, ciProfileData *data,
1648 Register recv, Label* update_done) {
1649 for (uint i = 0; i < ReceiverTypeData::row_limit(); i++) {
1650 Label next_test;
1651 // See if the receiver is receiver[n].
1652 __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
1653 __ jccb(Assembler::notEqual, next_test);
1654 Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
1655 __ addptr(data_addr, DataLayout::counter_increment);
1656 __ jmp(*update_done);
1657 __ bind(next_test);
1658 }
3049 void LIR_Assembler::store_parameter(Metadata* m, int offset_from_rsp_in_words) {
3050 assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
3051 int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
3052 assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
3053 __ mov_metadata(Address(rsp, offset_from_rsp_in_bytes), m, rscratch1);
3054 }
3055
3056
3057 // This code replaces a call to arraycopy; no exception may
3058 // be thrown in this code, they must be thrown in the System.arraycopy
3059 // activation frame; we could save some checks if this would not be the case
3060 void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
3061 ciArrayKlass* default_type = op->expected_type();
3062 Register src = op->src()->as_register();
3063 Register dst = op->dst()->as_register();
3064 Register src_pos = op->src_pos()->as_register();
3065 Register dst_pos = op->dst_pos()->as_register();
3066 Register length = op->length()->as_register();
3067 Register tmp = op->tmp()->as_register();
3068 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3069 Register tmp2 = UseCompactObjectHeaders ? rscratch2 : noreg;
3070
3071 CodeStub* stub = op->stub();
3072 int flags = op->flags();
3073 BasicType basic_type = default_type != NULL ? default_type->element_type()->basic_type() : T_ILLEGAL;
3074 if (is_reference_type(basic_type)) basic_type = T_OBJECT;
3075
3076 // if we don't know anything, just go through the generic arraycopy
3077 if (default_type == NULL) {
3078 // save outgoing arguments on stack in case call to System.arraycopy is needed
3079 // HACK ALERT. This code used to push the parameters in a hardwired fashion
3080 // for interpreter calling conventions. Now we have to do it in new style conventions.
3081 // For the moment until C1 gets the new register allocator I just force all the
3082 // args to the right place (except the register args) and then on the back side
3083 // reload the register args properly if we go slow path. Yuck
3084
3085 // These are proper for the calling convention
3086 store_parameter(length, 2);
3087 store_parameter(dst_pos, 1);
3088 store_parameter(dst, 0);
3089
3241 if (flags & LIR_OpArrayCopy::dst_range_check) {
3242 __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
3243 __ cmpl(tmp, dst_length_addr);
3244 __ jcc(Assembler::above, *stub->entry());
3245 }
3246
3247 if (flags & LIR_OpArrayCopy::length_positive_check) {
3248 __ testl(length, length);
3249 __ jcc(Assembler::less, *stub->entry());
3250 }
3251
3252 #ifdef _LP64
3253 __ movl2ptr(src_pos, src_pos); //higher 32bits must be null
3254 __ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
3255 #endif
3256
3257 if (flags & LIR_OpArrayCopy::type_check) {
3258 // We don't know the array types are compatible
3259 if (basic_type != T_OBJECT) {
3260 // Simple test for basic type arrays
3261 __ cmp_klass(src, dst, tmp, tmp2);
3262 __ jcc(Assembler::notEqual, *stub->entry());
3263 } else {
3264 // For object arrays, if src is a sub class of dst then we can
3265 // safely do the copy.
3266 Label cont, slow;
3267
3268 __ push(src);
3269 __ push(dst);
3270
3271 __ load_klass(src, src, tmp_load_klass);
3272 __ load_klass(dst, dst, tmp_load_klass);
3273
3274 __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
3275
3276 __ push(src);
3277 __ push(dst);
3278 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
3279 __ pop(dst);
3280 __ pop(src);
3281
3401 __ pop(src);
3402 }
3403 }
3404
3405 #ifdef ASSERT
3406 if (basic_type != T_OBJECT || !(flags & LIR_OpArrayCopy::type_check)) {
3407 // Sanity check the known type with the incoming class. For the
3408 // primitive case the types must match exactly with src.klass and
3409 // dst.klass each exactly matching the default type. For the
3410 // object array case, if no type check is needed then either the
3411 // dst type is exactly the expected type and the src type is a
3412 // subtype which we can't check or src is the same array as dst
3413 // but not necessarily exactly of type default_type.
3414 Label known_ok, halt;
3415 __ mov_metadata(tmp, default_type->constant_encoding());
3416 #ifdef _LP64
3417 if (UseCompressedClassPointers) {
3418 __ encode_klass_not_null(tmp, rscratch1);
3419 }
3420 #endif
3421 if (basic_type != T_OBJECT) {
3422 __ cmp_klass(tmp, dst, tmp2);
3423 __ jcc(Assembler::notEqual, halt);
3424 __ cmp_klass(tmp, src, tmp2);
3425 __ jcc(Assembler::equal, known_ok);
3426 } else {
3427 __ cmp_klass(tmp, dst, tmp2);
3428 __ jcc(Assembler::equal, known_ok);
3429 __ cmpptr(src, dst);
3430 __ jcc(Assembler::equal, known_ok);
3431 }
3432 __ bind(halt);
3433 __ stop("incorrect type information in arraycopy");
3434 __ bind(known_ok);
3435 }
3436 #endif
3437
3438 #ifndef PRODUCT
3439 if (PrintC1Statistics) {
3440 __ incrementl(ExternalAddress(Runtime1::arraycopy_count_address(basic_type)), rscratch1);
3441 }
3442 #endif
3443
3444 #ifdef _LP64
3445 assert_different_registers(c_rarg0, dst, dst_pos, length);
3446 __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
3447 assert_different_registers(c_rarg1, length);
3477
3478 __ lea(res, ExternalAddress(StubRoutines::crc_table_addr()));
3479 __ notl(crc); // ~crc
3480 __ update_byte_crc32(crc, val, res);
3481 __ notl(crc); // ~crc
3482 __ mov(res, crc);
3483 }
3484
3485 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3486 Register obj = op->obj_opr()->as_register(); // may not be an oop
3487 Register hdr = op->hdr_opr()->as_register();
3488 Register lock = op->lock_opr()->as_register();
3489 if (UseHeavyMonitors) {
3490 if (op->info() != NULL) {
3491 add_debug_info_for_null_check_here(op->info());
3492 __ null_check(obj);
3493 }
3494 __ jmp(*op->stub()->entry());
3495 } else if (op->code() == lir_lock) {
3496 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3497 Register tmp = UseFastLocking ? op->scratch_opr()->as_register() : noreg;
3498 // add debug info for NullPointerException only if one is possible
3499 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
3500 if (op->info() != NULL) {
3501 add_debug_info_for_null_check(null_check_offset, op->info());
3502 }
3503 // done
3504 } else if (op->code() == lir_unlock) {
3505 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3506 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3507 } else {
3508 Unimplemented();
3509 }
3510 __ bind(*op->stub()->continuation());
3511 }
3512
3513 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
3514 Register obj = op->obj()->as_pointer_register();
3515 Register result = op->result_opr()->as_pointer_register();
3516
3517 CodeEmitInfo* info = op->info();
3518 if (info != NULL) {
3519 add_debug_info_for_null_check_here(info);
3520 }
3521
3522 #ifdef _LP64
3523 if (UseCompactObjectHeaders) {
3524 Register tmp = rscratch1;
3525 assert_different_registers(tmp, obj);
3526 assert_different_registers(tmp, result);
3527
3528 // Check if we can take the (common) fast path, if obj is unlocked.
3529 __ movq(result, Address(obj, oopDesc::mark_offset_in_bytes()));
3530 __ testb(result, markWord::monitor_value);
3531 __ jcc(Assembler::notZero, *op->stub()->entry());
3532 __ bind(*op->stub()->continuation());
3533 // Fast-path: shift and decode Klass*.
3534 __ shrq(result, markWord::klass_shift);
3535 __ decode_klass_not_null(result, tmp);
3536 } else
3537 if (UseCompressedClassPointers) {
3538 __ movl(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3539 __ decode_klass_not_null(result, rscratch1);
3540 } else
3541 #endif
3542 __ movptr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
3543 }
3544
3545 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3546 ciMethod* method = op->profiled_method();
3547 int bci = op->profiled_bci();
3548 ciMethod* callee = op->profiled_callee();
3549 Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3550
3551 // Update counter for all call types
3552 ciMethodData* md = method->method_data_or_null();
3553 assert(md != NULL, "Sanity");
3554 ciProfileData* data = md->bci_to_data(bci);
3555 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3556 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|