< prev index next >

src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp

Print this page

 428   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 429   __ end_a_stub();
 430 
 431   return offset;
 432 }
 433 
 434 
 435 // Emit the code to remove the frame from the stack in the exception
 436 // unwind path.
 437 int LIR_Assembler::emit_unwind_handler() {
 438 #ifndef PRODUCT
 439   if (CommentedAssembly) {
 440     _masm->block_comment("Unwind handler");
 441   }
 442 #endif
 443 
 444   int offset = code_offset();
 445 
 446   // Fetch the exception from TLS and clear out exception related thread state
 447   Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 448   NOT_LP64(__ get_thread(rsi));
 449   __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
 450   __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
 451   __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
 452 
 453   __ bind(_unwind_handler_entry);
 454   __ verify_not_null_oop(rax);
 455   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 456     __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
 457   }
 458 
 459   // Preform needed unlocking
 460   MonitorExitStub* stub = NULL;
 461   if (method()->is_synchronized()) {
 462     monitor_address(0, FrameMap::rax_opr);
 463     stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
 464     __ unlock_object(rdi, rsi, rax, *stub->entry());
 465     __ bind(*stub->continuation());


 466   }
 467 
 468   if (compilation()->env()->dtrace_method_probes()) {
 469 #ifdef _LP64
 470     __ mov(rdi, r15_thread);
 471     __ mov_metadata(rsi, method()->constant_encoding());
 472 #else
 473     __ get_thread(rax);
 474     __ movptr(Address(rsp, 0), rax);
 475     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 476 #endif
 477     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 478   }
 479 
 480   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 481     __ mov(rax, rbx);  // Restore the exception
 482   }
 483 
 484   // remove the activation and dispatch to the unwind handler
 485   __ remove_frame(initial_frame_size_in_bytes());

2865   switch (code) {
2866   case lir_static_call:
2867   case lir_optvirtual_call:
2868   case lir_dynamic_call:
2869     offset += NativeCall::displacement_offset;
2870     break;
2871   case lir_icvirtual_call:
2872     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2873     break;
2874   default: ShouldNotReachHere();
2875   }
2876   __ align(BytesPerWord, offset);
2877 }
2878 
2879 
2880 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2881   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2882          "must be aligned");
2883   __ call(AddressLiteral(op->addr(), rtype));
2884   add_call_info(code_offset(), op->info());


2885 }
2886 
2887 
2888 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2889   __ ic_call(op->addr());
2890   add_call_info(code_offset(), op->info());

2891   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2892          "must be aligned");

2893 }
2894 
2895 
2896 void LIR_Assembler::emit_static_call_stub() {
2897   address call_pc = __ pc();
2898   address stub = __ start_a_stub(call_stub_size());
2899   if (stub == NULL) {
2900     bailout("static call stub overflow");
2901     return;
2902   }
2903 
2904   int start = __ offset();
2905 
2906   // make sure that the displacement word of the call ends up word aligned
2907   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2908   __ relocate(static_stub_Relocation::spec(call_pc));
2909   __ mov_metadata(rbx, (Metadata*)NULL);
2910   // must be set to -1 at code generation time
2911   assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2912   // On 64bit this will die since it will take a movq & jmp, must be only a jmp

3508 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3509   Register obj = op->obj_opr()->as_register();  // may not be an oop
3510   Register hdr = op->hdr_opr()->as_register();
3511   Register lock = op->lock_opr()->as_register();
3512   if (!UseFastLocking) {
3513     __ jmp(*op->stub()->entry());
3514   } else if (op->code() == lir_lock) {
3515     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3516     // add debug info for NullPointerException only if one is possible
3517     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
3518     if (op->info() != NULL) {
3519       add_debug_info_for_null_check(null_check_offset, op->info());
3520     }
3521     // done
3522   } else if (op->code() == lir_unlock) {
3523     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3524     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3525   } else {
3526     Unimplemented();
3527   }

















3528   __ bind(*op->stub()->continuation());














3529 }
3530 
3531 
3532 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3533   ciMethod* method = op->profiled_method();
3534   int bci          = op->profiled_bci();
3535   ciMethod* callee = op->profiled_callee();
3536   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3537 
3538   // Update counter for all call types
3539   ciMethodData* md = method->method_data_or_null();
3540   assert(md != NULL, "Sanity");
3541   ciProfileData* data = md->bci_to_data(bci);
3542   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3543   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
3544   Register mdo  = op->mdo()->as_register();
3545   __ mov_metadata(mdo, md->constant_encoding());
3546   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3547   // Perform additional virtual call profiling for invokevirtual and
3548   // invokeinterface bytecodes

3851   if (patch_code != lir_patch_none) {
3852     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3853   }
3854 
3855   Register reg = dest->as_pointer_register();
3856   LIR_Address* addr = src->as_address_ptr();
3857   __ lea(reg, as_Address(addr));
3858 
3859   if (patch != NULL) {
3860     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3861   }
3862 }
3863 
3864 
3865 
3866 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3867   assert(!tmp->is_valid(), "don't need temporary");
3868   __ call(RuntimeAddress(dest));
3869   if (info != NULL) {
3870     add_call_info_here(info);

3871   }

3872 }
3873 
3874 
3875 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3876   assert(type == T_LONG, "only for volatile long fields");
3877 
3878   if (info != NULL) {
3879     add_debug_info_for_null_check_here(info);
3880   }
3881 
3882   if (src->is_double_xmm()) {
3883     if (dest->is_double_cpu()) {
3884 #ifdef _LP64
3885       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3886 #else
3887       __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3888       __ psrlq(src->as_xmm_double_reg(), 32);
3889       __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3890 #endif // _LP64
3891     } else if (dest->is_double_stack()) {

 428   guarantee(code_offset() - offset <= exception_handler_size(), "overflow");
 429   __ end_a_stub();
 430 
 431   return offset;
 432 }
 433 
 434 
 435 // Emit the code to remove the frame from the stack in the exception
 436 // unwind path.
 437 int LIR_Assembler::emit_unwind_handler() {
 438 #ifndef PRODUCT
 439   if (CommentedAssembly) {
 440     _masm->block_comment("Unwind handler");
 441   }
 442 #endif
 443 
 444   int offset = code_offset();
 445 
 446   // Fetch the exception from TLS and clear out exception related thread state
 447   Register thread = NOT_LP64(rsi) LP64_ONLY(r15_thread);
 448   NOT_LP64(__ get_thread(thread));
 449   __ movptr(rax, Address(thread, JavaThread::exception_oop_offset()));
 450   __ movptr(Address(thread, JavaThread::exception_oop_offset()), (intptr_t)NULL_WORD);
 451   __ movptr(Address(thread, JavaThread::exception_pc_offset()), (intptr_t)NULL_WORD);
 452 
 453   __ bind(_unwind_handler_entry);
 454   __ verify_not_null_oop(rax);
 455   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 456     __ mov(rbx, rax);  // Preserve the exception (rbx is always callee-saved)
 457   }
 458 
 459   // Preform needed unlocking
 460   MonitorExitStub* stub = NULL;
 461   if (method()->is_synchronized()) {
 462     monitor_address(0, FrameMap::rax_opr);
 463     stub = new MonitorExitStub(FrameMap::rax_opr, true, 0);
 464     __ unlock_object(rdi, rsi, rax, *stub->entry());
 465     __ bind(*stub->continuation());
 466     NOT_LP64(__ get_thread(thread);)
 467     __ dec_held_monitor_count(thread);
 468   }
 469 
 470   if (compilation()->env()->dtrace_method_probes()) {
 471 #ifdef _LP64
 472     __ mov(rdi, r15_thread);
 473     __ mov_metadata(rsi, method()->constant_encoding());
 474 #else
 475     __ get_thread(rax);
 476     __ movptr(Address(rsp, 0), rax);
 477     __ mov_metadata(Address(rsp, sizeof(void*)), method()->constant_encoding());
 478 #endif
 479     __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit)));
 480   }
 481 
 482   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 483     __ mov(rax, rbx);  // Restore the exception
 484   }
 485 
 486   // remove the activation and dispatch to the unwind handler
 487   __ remove_frame(initial_frame_size_in_bytes());

2867   switch (code) {
2868   case lir_static_call:
2869   case lir_optvirtual_call:
2870   case lir_dynamic_call:
2871     offset += NativeCall::displacement_offset;
2872     break;
2873   case lir_icvirtual_call:
2874     offset += NativeCall::displacement_offset + NativeMovConstReg::instruction_size;
2875     break;
2876   default: ShouldNotReachHere();
2877   }
2878   __ align(BytesPerWord, offset);
2879 }
2880 
2881 
2882 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2883   assert((__ offset() + NativeCall::displacement_offset) % BytesPerWord == 0,
2884          "must be aligned");
2885   __ call(AddressLiteral(op->addr(), rtype));
2886   add_call_info(code_offset(), op->info());
2887   __ oopmap_metadata(op->info());
2888   __ post_call_nop();
2889 }
2890 
2891 
2892 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2893   __ ic_call(op->addr());
2894   add_call_info(code_offset(), op->info());
2895   __ oopmap_metadata(op->info());
2896   assert((__ offset() - NativeCall::instruction_size + NativeCall::displacement_offset) % BytesPerWord == 0,
2897          "must be aligned");
2898   __ post_call_nop();
2899 }
2900 
2901 
2902 void LIR_Assembler::emit_static_call_stub() {
2903   address call_pc = __ pc();
2904   address stub = __ start_a_stub(call_stub_size());
2905   if (stub == NULL) {
2906     bailout("static call stub overflow");
2907     return;
2908   }
2909 
2910   int start = __ offset();
2911 
2912   // make sure that the displacement word of the call ends up word aligned
2913   __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
2914   __ relocate(static_stub_Relocation::spec(call_pc));
2915   __ mov_metadata(rbx, (Metadata*)NULL);
2916   // must be set to -1 at code generation time
2917   assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
2918   // On 64bit this will die since it will take a movq & jmp, must be only a jmp

3514 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
3515   Register obj = op->obj_opr()->as_register();  // may not be an oop
3516   Register hdr = op->hdr_opr()->as_register();
3517   Register lock = op->lock_opr()->as_register();
3518   if (!UseFastLocking) {
3519     __ jmp(*op->stub()->entry());
3520   } else if (op->code() == lir_lock) {
3521     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3522     // add debug info for NullPointerException only if one is possible
3523     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
3524     if (op->info() != NULL) {
3525       add_debug_info_for_null_check(null_check_offset, op->info());
3526     }
3527     // done
3528   } else if (op->code() == lir_unlock) {
3529     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
3530     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
3531   } else {
3532     Unimplemented();
3533   }
3534   if (op->code() == lir_lock) {
3535     // If deoptimization happens in Runtime1::monitorenter, inc_held_monitor_count after backing from slowpath
3536     // will be skipped. Solution is
3537     // 1. Increase only in fastpath
3538     // 2. Runtime1::monitorenter increase count after locking
3539 #ifndef _LP64
3540     Register thread = rsi;
3541     __ push(thread);
3542     __ get_thread(thread);
3543 #else
3544     Register thread = r15_thread;
3545 #endif
3546     __ inc_held_monitor_count(thread);
3547 #ifndef _LP64
3548     __ pop(thread);
3549 #endif
3550   }
3551   __ bind(*op->stub()->continuation());
3552   if (op->code() == lir_unlock) {
3553     // unlock in slowpath is JRT_Leaf stub, no deoptimization can happen
3554 #ifndef _LP64
3555     Register thread = rsi;
3556     __ push(thread);
3557     __ get_thread(thread);
3558 #else
3559     Register thread = r15_thread;
3560 #endif
3561     __ dec_held_monitor_count(thread);
3562 #ifndef _LP64
3563     __ pop(thread);
3564 #endif
3565   }
3566 }
3567 
3568 
3569 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
3570   ciMethod* method = op->profiled_method();
3571   int bci          = op->profiled_bci();
3572   ciMethod* callee = op->profiled_callee();
3573   Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
3574 
3575   // Update counter for all call types
3576   ciMethodData* md = method->method_data_or_null();
3577   assert(md != NULL, "Sanity");
3578   ciProfileData* data = md->bci_to_data(bci);
3579   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
3580   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
3581   Register mdo  = op->mdo()->as_register();
3582   __ mov_metadata(mdo, md->constant_encoding());
3583   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
3584   // Perform additional virtual call profiling for invokevirtual and
3585   // invokeinterface bytecodes

3888   if (patch_code != lir_patch_none) {
3889     patch = new PatchingStub(_masm, PatchingStub::access_field_id);
3890   }
3891 
3892   Register reg = dest->as_pointer_register();
3893   LIR_Address* addr = src->as_address_ptr();
3894   __ lea(reg, as_Address(addr));
3895 
3896   if (patch != NULL) {
3897     patching_epilog(patch, patch_code, addr->base()->as_register(), info);
3898   }
3899 }
3900 
3901 
3902 
3903 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
3904   assert(!tmp->is_valid(), "don't need temporary");
3905   __ call(RuntimeAddress(dest));
3906   if (info != NULL) {
3907     add_call_info_here(info);
3908     __ oopmap_metadata(info);
3909   }
3910   __ post_call_nop();
3911 }
3912 
3913 
3914 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
3915   assert(type == T_LONG, "only for volatile long fields");
3916 
3917   if (info != NULL) {
3918     add_debug_info_for_null_check_here(info);
3919   }
3920 
3921   if (src->is_double_xmm()) {
3922     if (dest->is_double_cpu()) {
3923 #ifdef _LP64
3924       __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
3925 #else
3926       __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
3927       __ psrlq(src->as_xmm_double_reg(), 32);
3928       __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
3929 #endif // _LP64
3930     } else if (dest->is_double_stack()) {
< prev index next >