< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page




 203       __ push_oop(const_opr->as_jobject());
 204     } else if (const_opr->type() == T_INT) {
 205       __ push_jint(const_opr->as_jint());
 206     } else {
 207       ShouldNotReachHere();
 208     }
 209 
 210   } else {
 211     ShouldNotReachHere();
 212   }
 213 }
 214 
 215 void LIR_Assembler::pop(LIR_Opr opr) {
 216   if (opr->is_single_cpu()) {
 217     __ pop_reg(opr->as_register());
 218   } else {
 219     ShouldNotReachHere();
 220   }
 221 }
 222 

















 223 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
 224   return addr->base()->is_illegal() && addr->index()->is_illegal();
 225 }
 226 
 227 //-------------------------------------------
 228 
 229 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 230   return as_Address(addr, rscratch1);
 231 }
 232 
 233 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 234   if (addr->base()->is_illegal()) {
 235     assert(addr->index()->is_illegal(), "must be illegal too");
 236     AddressLiteral laddr((address)addr->disp(), relocInfo::none);
 237     if (! __ reachable(laddr)) {
 238       __ movptr(tmp, laddr.addr());
 239       Address res(tmp, 0);
 240       return res;
 241     } else {
 242       return __ as_Address(laddr);


2816   case lir_static_call:
2817   case lir_optvirtual_call:
2818   case lir_dynamic_call:
2819     offset += NativeCall::displacement_offset;
2820     break;
2821   case lir_icvirtual_call:
2822     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2823     break;
2824   case lir_virtual_call:  // currently, sparc-specific for niagara
2825   default: ShouldNotReachHere();
2826   }
2827   __ align(BytesPerWord, offset);
2828 }
2829 
2830 
2831 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2832   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2833          "must be aligned");
2834   __ call(AddressLiteral(op->addr(), rtype));
2835   add_call_info(code_offset(), op->info());


2836 }
2837 
2838 
2839 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2840   __ ic_call(op->addr());
2841   add_call_info(code_offset(), op->info());

2842   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2843          "must be aligned");

2844 }
2845 
2846 
2847 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2848 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2849   ShouldNotReachHere();
2850 }
2851 
2852 
2853 void LIR_Assembler::emit_static_call_stub() {
2854   address call_pc = __ pc();
2855   address stub = __ start_a_stub(call_stub_size());
2856   if (stub == NULL) {
2857     bailout("static call stub overflow");
2858     return;
2859   }
2860 
2861   int start = __ offset();
2862 
2863   // make sure that the displacement word of the call ends up word aligned


3821   if (patch_code != lir_patch_none) {
3822     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3823   }
3824 
3825   Register reg = dest->as_pointer_register();
3826   LIR_Address* addr = src->as_address_ptr();
3827   __ lea(reg, as_Address(addr));
3828 
3829   if (patch != NULL) {
3830     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3831   }
3832 }
3833 
3834 
3835 
3836 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3837   assert(!tmp->is_valid(), "don't need temporary");
3838   __ call(RuntimeAddress(dest));
3839   if (info != NULL) {
3840     add_call_info_here(info);

3841   }

3842 }
3843 
3844 
3845 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3846   assert(type == T_LONG, "only for volatile long fields");
3847 
3848   if (info != NULL) {
3849     add_debug_info_for_null_check_here(info);
3850   }
3851 
3852   if (src->is_double_xmm()) {
3853     if (dest->is_double_cpu()) {
3854 #ifdef _LP64
3855       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3856 #else
3857       __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3858       __ psrlq(src->as_xmm_double_reg(), 32);
3859       __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3860 #endif // _LP64
3861     } else if (dest->is_double_stack()) {




 203       __ push_oop(const_opr->as_jobject());
 204     } else if (const_opr->type() == T_INT) {
 205       __ push_jint(const_opr->as_jint());
 206     } else {
 207       ShouldNotReachHere();
 208     }
 209 
 210   } else {
 211     ShouldNotReachHere();
 212   }
 213 }
 214 
 215 void LIR_Assembler::pop(LIR_Opr opr) {
 216   if (opr->is_single_cpu()) {
 217     __ pop_reg(opr->as_register());
 218   } else {
 219     ShouldNotReachHere();
 220   }
 221 }
 222 
 223 void LIR_Assembler::getfp(LIR_Opr opr) {
 224   __ lea(opr->as_register_lo(), Address(rsp, initial_frame_size_in_bytes() + wordSize)); // + wordSize seems to be required to handle the push rbp before the sub of rsp
 225 }
 226 
 227 void LIR_Assembler::getsp(LIR_Opr opr) {
 228   __ movptr(opr->as_register_lo(), rsp);
 229 }
 230 
 231 #if 0
 232 void LIR_Assembler::getpc(LIR_Opr opr) {
 233   const char *name + "cont_getPC";
 234   address entry = StubRoutines::cont_getPC();
 235   __ call_VM_leaf(entry, 0);
 236   __ movptr(opr->as_register_lo(), rax);
 237 }
 238 #endif
 239 
 240 bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
 241   return addr->base()->is_illegal() && addr->index()->is_illegal();
 242 }
 243 
 244 //-------------------------------------------
 245 
 246 Address LIR_Assembler::as_Address(LIR_Address* addr) {
 247   return as_Address(addr, rscratch1);
 248 }
 249 
 250 Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
 251   if (addr->base()->is_illegal()) {
 252     assert(addr->index()->is_illegal(), "must be illegal too");
 253     AddressLiteral laddr((address)addr->disp(), relocInfo::none);
 254     if (! __ reachable(laddr)) {
 255       __ movptr(tmp, laddr.addr());
 256       Address res(tmp, 0);
 257       return res;
 258     } else {
 259       return __ as_Address(laddr);


2833   case lir_static_call:
2834   case lir_optvirtual_call:
2835   case lir_dynamic_call:
2836     offset += NativeCall::displacement_offset;
2837     break;
2838   case lir_icvirtual_call:
2839     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2840     break;
2841   case lir_virtual_call:  // currently, sparc-specific for niagara
2842   default: ShouldNotReachHere();
2843   }
2844   __ align(BytesPerWord, offset);
2845 }
2846 
2847 
2848 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2849   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2850          "must be aligned");
2851   __ call(AddressLiteral(op->addr(), rtype));
2852   add_call_info(code_offset(), op->info());
2853   __ oopmap_metadata(op->info());
2854   __ post_call_nop();
2855 }
2856 
2857 
2858 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2859   __ ic_call(op->addr());
2860   add_call_info(code_offset(), op->info());
2861   __ oopmap_metadata(op->info());
2862   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2863          "must be aligned");
2864   __ post_call_nop();
2865 }
2866 
2867 
2868 /* Currently, vtable-dispatch is only enabled for sparc platforms */
2869 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
2870   ShouldNotReachHere();
2871 }
2872 
2873 
2874 void LIR_Assembler::emit_static_call_stub() {
2875   address call_pc = __ pc();
2876   address stub = __ start_a_stub(call_stub_size());
2877   if (stub == NULL) {
2878     bailout("static call stub overflow");
2879     return;
2880   }
2881 
2882   int start = __ offset();
2883 
2884   // make sure that the displacement word of the call ends up word aligned


3842   if (patch_code != lir_patch_none) {
3843     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3844   }
3845 
3846   Register reg = dest->as_pointer_register();
3847   LIR_Address* addr = src->as_address_ptr();
3848   __ lea(reg, as_Address(addr));
3849 
3850   if (patch != NULL) {
3851     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3852   }
3853 }
3854 
3855 
3856 
3857 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3858   assert(!tmp->is_valid(), "don't need temporary");
3859   __ call(RuntimeAddress(dest));
3860   if (info != NULL) {
3861     add_call_info_here(info);
3862     __ oopmap_metadata(info);
3863   }
3864   __ post_call_nop();
3865 }
3866 
3867 
3868 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3869   assert(type == T_LONG, "only for volatile long fields");
3870 
3871   if (info != NULL) {
3872     add_debug_info_for_null_check_here(info);
3873   }
3874 
3875   if (src->is_double_xmm()) {
3876     if (dest->is_double_cpu()) {
3877 #ifdef _LP64
3878       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3879 #else
3880       __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3881       __ psrlq(src->as_xmm_double_reg(), 32);
3882       __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3883 #endif // _LP64
3884     } else if (dest->is_double_stack()) {


< prev index next >