< prev index next > src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
Print this page
// values on its own
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
+ static int r15_offset_in_bytes(void) { return BytesPerInt * r15_off; }
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
// During deoptimization only the result registers need to be restored,
// all the other values have already been extracted.
// rsp: pointer to the spilled rbp in the entry frame
//
// Kills:
// rbx
//
- void static continuation_enter_cleanup(MacroAssembler* masm) {
+ static void continuation_enter_cleanup(MacroAssembler* masm) {
#ifdef ASSERT
Label L_good_sp;
__ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
__ jcc(Assembler::equal, L_good_sp);
__ stop("Incorrect rsp at continuation_enter_cleanup");
// --- Thawing path
__ bind(L_thaw);
+ ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
__ call(RuntimeAddress(StubRoutines::cont_thaw()));
ContinuationEntry::_return_pc_offset = __ pc() - start;
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
__ post_call_nop();
__ leave();
__ ret(0);
}
+ void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
+ ::continuation_enter_cleanup(masm);
+ }
+
static void gen_special_dispatch(MacroAssembler* masm,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
+ __ jmp (lock_done);
+
__ bind(count_mon);
__ inc_held_monitor_count();
// Slow path will re-enter here
__ bind(lock_done);
if (LockingMode == LM_LEGACY) {
Label not_recur;
// Simple recursive lock?
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
__ jcc(Assembler::notEqual, not_recur);
- __ dec_held_monitor_count();
__ jmpb(fast_done);
__ bind(not_recur);
}
// Must save rax if it is live now because cmpxchg must use it
__ jcc(Assembler::notEqual, slow_path_unlock);
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
- __ dec_held_monitor_count();
}
// slow path re-enters here
__ bind(unlock_done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
__ mov(c_rarg0, obj_reg);
__ mov(c_rarg1, lock_reg);
__ mov(c_rarg2, r15_thread);
// Not a leaf but we have last_Java_frame setup as we want
+ // Force freeze slow path on ObjectMonitor::enter() for now which will fail with freeze_pinned_native.
+ __ push_cont_fastpath();
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
+ __ pop_cont_fastpath();
restore_args(masm, total_c_args, c_arg, out_regs);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
// return address.
uint SharedRuntime::in_preserve_stack_slots() {
return 4 + 2 * VerifyStackAtCalls;
}
+ VMReg SharedRuntime::thread_register() {
+ return r15_thread->as_VMReg();
+ }
+
//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code
ResourceMark rm;
// Setup code generation tools
< prev index next >