795
796
797 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
798 int load_offset;
799 if (!Assembler::is_simm16(offset)) {
800 // For offsets larger than a simm16 we setup the offset.
801 __ load_const_optimized(R0, offset);
802 load_offset = load(base, R0, to_reg, type, wide);
803 } else {
804 load_offset = code_offset();
805 switch(type) {
806 case T_BOOLEAN: // fall through
807 case T_BYTE : __ lbz(to_reg->as_register(), offset, base);
808 __ extsb(to_reg->as_register(), to_reg->as_register()); break;
809 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break;
810 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break;
811 case T_INT : __ lwa(to_reg->as_register(), offset, base); break;
812 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break;
813 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
814 case T_ADDRESS:
815 if (offset == oopDesc::klass_offset_in_bytes() && UseCompressedClassPointers) {
816 __ lwz(to_reg->as_register(), offset, base);
817 __ decode_klass_not_null(to_reg->as_register());
818 } else {
819 __ ld(to_reg->as_register(), offset, base);
820 }
821 break;
822 case T_ARRAY : // fall through
823 case T_OBJECT:
824 {
825 if (UseCompressedOops && !wide) {
826 __ lwz(to_reg->as_register(), offset, base);
827 __ decode_heap_oop(to_reg->as_register());
828 } else {
829 __ ld(to_reg->as_register(), offset, base);
830 }
831 __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
832 break;
833 }
834 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break;
835 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
836 default : ShouldNotReachHere();
837 }
838 }
839 return load_offset;
840 }
2718 __ b(*op->stub()->entry());
2719 }
2720 } else {
2721 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2722 if (UseFastLocking) {
2723 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2724 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2725 } else {
2726 // always do slow unlocking
2727 // note: The slow unlocking code could be inlined here, however if we use
2728 // slow unlocking, speed doesn't matter anyway and this solution is
2729 // simpler and requires less duplicated code - additionally, the
2730 // slow unlocking code is the same in either case which simplifies
2731 // debugging.
2732 __ b(*op->stub()->entry());
2733 }
2734 }
2735 __ bind(*op->stub()->continuation());
2736 }
2737
2738
2739 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2740 ciMethod* method = op->profiled_method();
2741 int bci = op->profiled_bci();
2742 ciMethod* callee = op->profiled_callee();
2743
2744 // Update counter for all call types.
2745 ciMethodData* md = method->method_data_or_null();
2746 assert(md != NULL, "Sanity");
2747 ciProfileData* data = md->bci_to_data(bci);
2748 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2749 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2750 Register mdo = op->mdo()->as_register();
2751 #ifdef _LP64
2752 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2753 Register tmp1 = op->tmp1()->as_register_lo();
2754 #else
2755 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2756 Register tmp1 = op->tmp1()->as_register();
2757 #endif
|
795
796
797 int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
798 int load_offset;
799 if (!Assembler::is_simm16(offset)) {
800 // For offsets larger than a simm16 we setup the offset.
801 __ load_const_optimized(R0, offset);
802 load_offset = load(base, R0, to_reg, type, wide);
803 } else {
804 load_offset = code_offset();
805 switch(type) {
806 case T_BOOLEAN: // fall through
807 case T_BYTE : __ lbz(to_reg->as_register(), offset, base);
808 __ extsb(to_reg->as_register(), to_reg->as_register()); break;
809 case T_CHAR : __ lhz(to_reg->as_register(), offset, base); break;
810 case T_SHORT : __ lha(to_reg->as_register(), offset, base); break;
811 case T_INT : __ lwa(to_reg->as_register(), offset, base); break;
812 case T_LONG : __ ld(to_reg->as_register_lo(), offset, base); break;
813 case T_METADATA: __ ld(to_reg->as_register(), offset, base); break;
814 case T_ADDRESS:
815 __ ld(to_reg->as_register(), offset, base);
816 break;
817 case T_ARRAY : // fall through
818 case T_OBJECT:
819 {
820 if (UseCompressedOops && !wide) {
821 __ lwz(to_reg->as_register(), offset, base);
822 __ decode_heap_oop(to_reg->as_register());
823 } else {
824 __ ld(to_reg->as_register(), offset, base);
825 }
826 __ verify_oop(to_reg->as_register(), FILE_AND_LINE);
827 break;
828 }
829 case T_FLOAT: __ lfs(to_reg->as_float_reg(), offset, base); break;
830 case T_DOUBLE: __ lfd(to_reg->as_double_reg(), offset, base); break;
831 default : ShouldNotReachHere();
832 }
833 }
834 return load_offset;
835 }
2713 __ b(*op->stub()->entry());
2714 }
2715 } else {
2716 assert (op->code() == lir_unlock, "Invalid code, expected lir_unlock");
2717 if (UseFastLocking) {
2718 assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2719 __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2720 } else {
2721 // always do slow unlocking
2722 // note: The slow unlocking code could be inlined here, however if we use
2723 // slow unlocking, speed doesn't matter anyway and this solution is
2724 // simpler and requires less duplicated code - additionally, the
2725 // slow unlocking code is the same in either case which simplifies
2726 // debugging.
2727 __ b(*op->stub()->entry());
2728 }
2729 }
2730 __ bind(*op->stub()->continuation());
2731 }
2732
2733 void LIR_Assembler::emit_load_klass(LIR_OpLoadKlass* op) {
2734 Register obj = op->obj()->as_pointer_register();
2735 Register result = op->result_opr()->as_pointer_register();
2736
2737 CodeEmitInfo* info = op->info();
2738 if (info != NULL) {
2739 if (info != NULL) {
2740 if (!os::zero_page_read_protected() || !ImplicitNullChecks) {
2741 explicit_null_check(obj, info);
2742 } else {
2743 add_debug_info_for_null_check_here(info);
2744 }
2745 }
2746 }
2747
2748 if (UseCompressedClassPointers) {
2749 __ lwz(result, oopDesc::klass_offset_in_bytes(), obj);
2750 __ decode_klass_not_null(result);
2751 } else {
2752 __ ld(result, oopDesc::klass_offset_in_bytes(), obj);
2753 }
2754 }
2755
2756 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2757 ciMethod* method = op->profiled_method();
2758 int bci = op->profiled_bci();
2759 ciMethod* callee = op->profiled_callee();
2760
2761 // Update counter for all call types.
2762 ciMethodData* md = method->method_data_or_null();
2763 assert(md != NULL, "Sanity");
2764 ciProfileData* data = md->bci_to_data(bci);
2765 assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2766 assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
2767 Register mdo = op->mdo()->as_register();
2768 #ifdef _LP64
2769 assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
2770 Register tmp1 = op->tmp1()->as_register_lo();
2771 #else
2772 assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
2773 Register tmp1 = op->tmp1()->as_register();
2774 #endif
|