< prev index next >

src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp

Print this page

 424   int offset = code_offset();
 425 
 426   // Fetch the exception from TLS and clear out exception related thread state
 427   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 428   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 429   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 430 
 431   __ bind(_unwind_handler_entry);
 432   __ verify_not_null_oop(r0);
 433   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 434     __ mov(r19, r0);  // Preserve the exception
 435   }
 436 
 437   // Preform needed unlocking
 438   MonitorExitStub* stub = NULL;
 439   if (method()->is_synchronized()) {
 440     monitor_address(0, FrameMap::r0_opr);
 441     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 442     __ unlock_object(r5, r4, r0, *stub->entry());
 443     __ bind(*stub->continuation());

 444   }
 445 
 446   if (compilation()->env()->dtrace_method_probes()) {
 447     __ mov(c_rarg0, rthread);
 448     __ mov_metadata(c_rarg1, method()->constant_encoding());
 449     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 450   }
 451 
 452   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 453     __ mov(r0, r19);  // Restore the exception
 454   }
 455 
 456   // remove the activation and dispatch to the unwind handler
 457   __ block_comment("remove_frame and dispatch to the unwind handler");
 458   __ remove_frame(initial_frame_size_in_bytes());
 459   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 460 
 461   // Emit the slow path assembly
 462   if (stub != NULL) {
 463     stub->emit_code(this);

2039     __ mov(dst->as_register(), (uint64_t)-1L);
2040     __ br(Assembler::LT, done);
2041     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2042     __ bind(done);
2043   } else {
2044     ShouldNotReachHere();
2045   }
2046 }
2047 
2048 
2049 void LIR_Assembler::align_call(LIR_Code code) {  }
2050 
2051 
2052 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2053   address call = __ trampoline_call(Address(op->addr(), rtype));
2054   if (call == NULL) {
2055     bailout("trampoline stub overflow");
2056     return;
2057   }
2058   add_call_info(code_offset(), op->info());

2059 }
2060 
2061 
2062 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2063   address call = __ ic_call(op->addr());
2064   if (call == NULL) {
2065     bailout("trampoline stub overflow");
2066     return;
2067   }
2068   add_call_info(code_offset(), op->info());

2069 }
2070 
2071 void LIR_Assembler::emit_static_call_stub() {
2072   address call_pc = __ pc();
2073   address stub = __ start_a_stub(call_stub_size());
2074   if (stub == NULL) {
2075     bailout("static call stub overflow");
2076     return;
2077   }
2078 
2079   int start = __ offset();
2080 
2081   __ relocate(static_stub_Relocation::spec(call_pc));
2082   __ emit_static_call_stub();
2083 
2084   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2085         <= call_stub_size(), "stub too big");
2086   __ end_a_stub();
2087 }
2088 

2573 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2574   Register obj = op->obj_opr()->as_register();  // may not be an oop
2575   Register hdr = op->hdr_opr()->as_register();
2576   Register lock = op->lock_opr()->as_register();
2577   if (!UseFastLocking) {
2578     __ b(*op->stub()->entry());
2579   } else if (op->code() == lir_lock) {
2580     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2581     // add debug info for NullPointerException only if one is possible
2582     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2583     if (op->info() != NULL) {
2584       add_debug_info_for_null_check(null_check_offset, op->info());
2585     }
2586     // done
2587   } else if (op->code() == lir_unlock) {
2588     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2589     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2590   } else {
2591     Unimplemented();
2592   }







2593   __ bind(*op->stub()->continuation());




2594 }
2595 
2596 
2597 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2598   ciMethod* method = op->profiled_method();
2599   int bci          = op->profiled_bci();
2600   ciMethod* callee = op->profiled_callee();
2601 
2602   // Update counter for all call types
2603   ciMethodData* md = method->method_data_or_null();
2604   assert(md != NULL, "Sanity");
2605   ciProfileData* data = md->bci_to_data(bci);
2606   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2607   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2608   Register mdo  = op->mdo()->as_register();
2609   __ mov_metadata(mdo, md->constant_encoding());
2610   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2611   // Perform additional virtual call profiling for invokevirtual and
2612   // invokeinterface bytecodes
2613   if (op->should_profile_receiver_type()) {

2885   }
2886 
2887   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2888 }
2889 
2890 
2891 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2892   assert(!tmp->is_valid(), "don't need temporary");
2893 
2894   CodeBlob *cb = CodeCache::find_blob(dest);
2895   if (cb) {
2896     __ far_call(RuntimeAddress(dest));
2897   } else {
2898     __ mov(rscratch1, RuntimeAddress(dest));
2899     __ blr(rscratch1);
2900   }
2901 
2902   if (info != NULL) {
2903     add_call_info_here(info);
2904   }

2905 }
2906 
2907 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2908   if (dest->is_address() || src->is_address()) {
2909     move_op(src, dest, type, lir_patch_none, info,
2910             /*pop_fpu_stack*/false, /*wide*/false);
2911   } else {
2912     ShouldNotReachHere();
2913   }
2914 }
2915 
2916 #ifdef ASSERT
2917 // emit run-time assertion
2918 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2919   assert(op->code() == lir_assert, "must be");
2920 
2921   if (op->in_opr1()->is_valid()) {
2922     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2923     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2924   } else {

 424   int offset = code_offset();
 425 
 426   // Fetch the exception from TLS and clear out exception related thread state
 427   __ ldr(r0, Address(rthread, JavaThread::exception_oop_offset()));
 428   __ str(zr, Address(rthread, JavaThread::exception_oop_offset()));
 429   __ str(zr, Address(rthread, JavaThread::exception_pc_offset()));
 430 
 431   __ bind(_unwind_handler_entry);
 432   __ verify_not_null_oop(r0);
 433   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 434     __ mov(r19, r0);  // Preserve the exception
 435   }
 436 
 437   // Preform needed unlocking
 438   MonitorExitStub* stub = NULL;
 439   if (method()->is_synchronized()) {
 440     monitor_address(0, FrameMap::r0_opr);
 441     stub = new MonitorExitStub(FrameMap::r0_opr, true, 0);
 442     __ unlock_object(r5, r4, r0, *stub->entry());
 443     __ bind(*stub->continuation());
 444     __ dec_held_monitor_count(rthread);
 445   }
 446 
 447   if (compilation()->env()->dtrace_method_probes()) {
 448     __ mov(c_rarg0, rthread);
 449     __ mov_metadata(c_rarg1, method()->constant_encoding());
 450     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), c_rarg0, c_rarg1);
 451   }
 452 
 453   if (method()->is_synchronized() || compilation()->env()->dtrace_method_probes()) {
 454     __ mov(r0, r19);  // Restore the exception
 455   }
 456 
 457   // remove the activation and dispatch to the unwind handler
 458   __ block_comment("remove_frame and dispatch to the unwind handler");
 459   __ remove_frame(initial_frame_size_in_bytes());
 460   __ far_jump(RuntimeAddress(Runtime1::entry_for(Runtime1::unwind_exception_id)));
 461 
 462   // Emit the slow path assembly
 463   if (stub != NULL) {
 464     stub->emit_code(this);

2040     __ mov(dst->as_register(), (uint64_t)-1L);
2041     __ br(Assembler::LT, done);
2042     __ csinc(dst->as_register(), zr, zr, Assembler::EQ);
2043     __ bind(done);
2044   } else {
2045     ShouldNotReachHere();
2046   }
2047 }
2048 
2049 
2050 void LIR_Assembler::align_call(LIR_Code code) {  }
2051 
2052 
2053 void LIR_Assembler::call(LIR_OpJavaCall* op, relocInfo::relocType rtype) {
2054   address call = __ trampoline_call(Address(op->addr(), rtype));
2055   if (call == NULL) {
2056     bailout("trampoline stub overflow");
2057     return;
2058   }
2059   add_call_info(code_offset(), op->info());
2060   __ post_call_nop();
2061 }
2062 
2063 
2064 void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
2065   address call = __ ic_call(op->addr());
2066   if (call == NULL) {
2067     bailout("trampoline stub overflow");
2068     return;
2069   }
2070   add_call_info(code_offset(), op->info());
2071   __ post_call_nop();
2072 }
2073 
2074 void LIR_Assembler::emit_static_call_stub() {
2075   address call_pc = __ pc();
2076   address stub = __ start_a_stub(call_stub_size());
2077   if (stub == NULL) {
2078     bailout("static call stub overflow");
2079     return;
2080   }
2081 
2082   int start = __ offset();
2083 
2084   __ relocate(static_stub_Relocation::spec(call_pc));
2085   __ emit_static_call_stub();
2086 
2087   assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
2088         <= call_stub_size(), "stub too big");
2089   __ end_a_stub();
2090 }
2091 

2576 void LIR_Assembler::emit_lock(LIR_OpLock* op) {
2577   Register obj = op->obj_opr()->as_register();  // may not be an oop
2578   Register hdr = op->hdr_opr()->as_register();
2579   Register lock = op->lock_opr()->as_register();
2580   if (!UseFastLocking) {
2581     __ b(*op->stub()->entry());
2582   } else if (op->code() == lir_lock) {
2583     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2584     // add debug info for NullPointerException only if one is possible
2585     int null_check_offset = __ lock_object(hdr, obj, lock, *op->stub()->entry());
2586     if (op->info() != NULL) {
2587       add_debug_info_for_null_check(null_check_offset, op->info());
2588     }
2589     // done
2590   } else if (op->code() == lir_unlock) {
2591     assert(BasicLock::displaced_header_offset_in_bytes() == 0, "lock_reg must point to the displaced header");
2592     __ unlock_object(hdr, obj, lock, *op->stub()->entry());
2593   } else {
2594     Unimplemented();
2595   }
2596   if (op->code() == lir_lock) {
2597     // If deoptimization happens in Runtime1::monitorenter, inc_held_monitor_count after backing from slowpath
2598     // will be skipped. Solution is:
2599     // 1. Increase only in fastpath
2600     // 2. Runtime1::monitorenter increase count after locking
2601     __ inc_held_monitor_count(rthread);
2602   }
2603   __ bind(*op->stub()->continuation());
2604   if (op->code() == lir_unlock) {
2605     // unlock in slowpath is JRT_Leaf stub, no deoptimization can happen
2606     __ dec_held_monitor_count(rthread);
2607   }
2608 }
2609 
2610 
2611 void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
2612   ciMethod* method = op->profiled_method();
2613   int bci          = op->profiled_bci();
2614   ciMethod* callee = op->profiled_callee();
2615 
2616   // Update counter for all call types
2617   ciMethodData* md = method->method_data_or_null();
2618   assert(md != NULL, "Sanity");
2619   ciProfileData* data = md->bci_to_data(bci);
2620   assert(data != NULL && data->is_CounterData(), "need CounterData for calls");
2621   assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
2622   Register mdo  = op->mdo()->as_register();
2623   __ mov_metadata(mdo, md->constant_encoding());
2624   Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
2625   // Perform additional virtual call profiling for invokevirtual and
2626   // invokeinterface bytecodes
2627   if (op->should_profile_receiver_type()) {

2899   }
2900 
2901   __ lea(dest->as_register_lo(), as_Address(addr->as_address_ptr()));
2902 }
2903 
2904 
2905 void LIR_Assembler::rt_call(LIR_Opr result, address dest, const LIR_OprList* args, LIR_Opr tmp, CodeEmitInfo* info) {
2906   assert(!tmp->is_valid(), "don't need temporary");
2907 
2908   CodeBlob *cb = CodeCache::find_blob(dest);
2909   if (cb) {
2910     __ far_call(RuntimeAddress(dest));
2911   } else {
2912     __ mov(rscratch1, RuntimeAddress(dest));
2913     __ blr(rscratch1);
2914   }
2915 
2916   if (info != NULL) {
2917     add_call_info_here(info);
2918   }
2919   __ post_call_nop();
2920 }
2921 
2922 void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info) {
2923   if (dest->is_address() || src->is_address()) {
2924     move_op(src, dest, type, lir_patch_none, info,
2925             /*pop_fpu_stack*/false, /*wide*/false);
2926   } else {
2927     ShouldNotReachHere();
2928   }
2929 }
2930 
2931 #ifdef ASSERT
2932 // emit run-time assertion
2933 void LIR_Assembler::emit_assert(LIR_OpAssert* op) {
2934   assert(op->code() == lir_assert, "must be");
2935 
2936   if (op->in_opr1()->is_valid()) {
2937     assert(op->in_opr2()->is_valid(), "both operands must be valid");
2938     comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
2939   } else {
< prev index next >