704
705 PatchingStub* patch = NULL;
706 if (patch_code != lir_patch_none) {
707 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
708 }
709 if (info != NULL) {
710 add_debug_info_for_null_check_here(info);
711 }
712
713 switch (type) {
714 case T_OBJECT: // fall through
715 case T_ARRAY:
716 if (UseCompressedOops && !wide) {
717 __ ldr_u32(dest->as_register(), as_Address(addr));
718 } else {
719 __ ldr(dest->as_register(), as_Address(addr));
720 }
721 break;
722
723 case T_ADDRESS:
724 if (UseCompressedClassPointers && addr->disp() == oopDesc::klass_offset_in_bytes()) {
725 __ ldr_u32(dest->as_pointer_register(), as_Address(addr));
726 } else {
727 __ ldr(dest->as_pointer_register(), as_Address(addr));
728 }
729 break;
730
731 case T_INT:
732 #ifdef __SOFTFP__
733 case T_FLOAT:
734 #endif // __SOFTFP__
735 __ ldr(dest->as_pointer_register(), as_Address(addr));
736 break;
737
738 case T_BOOLEAN:
739 __ ldrb(dest->as_register(), as_Address(addr));
740 break;
741
742 case T_BYTE:
743 __ ldrsb(dest->as_register(), as_Address(addr));
744 break;
745
746 case T_CHAR:
747 __ ldrh(dest->as_register(), as_Address(addr));
748 break;
2437 Register lock = op->lock_opr()->as_pointer_register();
2438 Register tmp = op->scratch_opr()->is_illegal() ? noreg :
2439 op->scratch_opr()->as_pointer_register();
2440
2441 if (!UseFastLocking) {
2442 __ b(*op->stub()->entry());
2443 } else if (op->code() == lir_lock) {
2444 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2445 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2446 if (op->info() != NULL) {
2447 add_debug_info_for_null_check(null_check_offset, op->info());
2448 }
2449 } else if (op->code() == lir_unlock) {
2450 __ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2451 } else {
2452 ShouldNotReachHere();
2453 }
2454 __ bind(*op->stub()->continuation());
2455 }
2456
2457
2458 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2459 ciMethod* method = op->profiled_method();
2460 int bci = op->profiled_bci();
2461 ciMethod* callee = op->profiled_callee();
2462
2463 // Update counter for all call types
2464 ciMethodData* md = method->method_data_or_null();
2465 assert(md != NULL, "Sanity");
2466 ciProfileData* data = md->bci_to_data(bci);
2467 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2468 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2469 Register mdo = op->mdo()->as_register();
2470 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2471 Register tmp1 = op->tmp1()->as_pointer_register();
2472 assert_different_registers(mdo, tmp1);
2473 __ mov_metadata(mdo, md->constant_encoding());
2474 int mdo_offset_bias = 0;
2475 int max_offset = 4096;
2476 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
|
704
705 PatchingStub* patch = NULL;
706 if (patch_code != lir_patch_none) {
707 patch = new PatchingStub(_masm, PatchingStub::access_field_id);
708 }
709 if (info != NULL) {
710 add_debug_info_for_null_check_here(info);
711 }
712
713 switch (type) {
714 case T_OBJECT: // fall through
715 case T_ARRAY:
716 if (UseCompressedOops && !wide) {
717 __ ldr_u32(dest->as_register(), as_Address(addr));
718 } else {
719 __ ldr(dest->as_register(), as_Address(addr));
720 }
721 break;
722
723 case T_ADDRESS:
724 __ ldr(dest->as_pointer_register(), as_Address(addr));
725 break;
726
727 case T_INT:
728 #ifdef __SOFTFP__
729 case T_FLOAT:
730 #endif // __SOFTFP__
731 __ ldr(dest->as_pointer_register(), as_Address(addr));
732 break;
733
734 case T_BOOLEAN:
735 __ ldrb(dest->as_register(), as_Address(addr));
736 break;
737
738 case T_BYTE:
739 __ ldrsb(dest->as_register(), as_Address(addr));
740 break;
741
742 case T_CHAR:
743 __ ldrh(dest->as_register(), as_Address(addr));
744 break;
2433 Register lock = op->lock_opr()->as_pointer_register();
2434 Register tmp = op->scratch_opr()->is_illegal() ? noreg :
2435 op->scratch_opr()->as_pointer_register();
2436
2437 if (!UseFastLocking) {
2438 __ b(*op->stub()->entry());
2439 } else if (op->code() == lir_lock) {
2440 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2441 int null_check_offset = __ lock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2442 if (op->info() != NULL) {
2443 add_debug_info_for_null_check(null_check_offset, op->info());
2444 }
2445 } else if (op->code() == lir_unlock) {
2446 __ unlock_object(hdr, obj, lock, tmp, *op->stub()->entry());
2447 } else {
2448 ShouldNotReachHere();
2449 }
2450 __ bind(*op->stub()->continuation());
2451 }
2452
2453 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2454 Register obj = op->obj()->as_pointer_register();
2455 Register result = op->result_opr()->as_pointer_register();
2456
2457 CodeEmitInfo* info = op->info();
2458 if (info != NULL) {
2459 add_debug_info_for_null_check_here(info);
2460 }
2461
2462 if (UseCompressedClassPointers) { // On 32 bit arm??
2463 __ ldr_u32(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2464 } else {
2465 __ ldr(result, Address(obj, oopDesc::klass_offset_in_bytes()));
2466 }
2467 }
2468
2469 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2470 ciMethod* method = op->profiled_method();
2471 int bci = op->profiled_bci();
2472 ciMethod* callee = op->profiled_callee();
2473
2474 // Update counter for all call types
2475 ciMethodData* md = method->method_data_or_null();
2476 assert(md != NULL, "Sanity");
2477 ciProfileData* data = md->bci_to_data(bci);
2478 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2479 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2480 Register mdo = op->mdo()->as_register();
2481 assert(op->tmp1()->is_register(), "tmp1 must be allocated");
2482 Register tmp1 = op->tmp1()->as_pointer_register();
2483 assert_different_registers(mdo, tmp1);
2484 __ mov_metadata(mdo, md->constant_encoding());
2485 int mdo_offset_bias = 0;
2486 int max_offset = 4096;
2487 if (md->byte_offset_of_slot(data, CounterData::count_offset()) + data->size_in_bytes() >= max_offset) {
|