< prev index next > src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
Print this page
// values on its own
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
+ static int r15_offset_in_bytes(void) { return BytesPerInt * r15_off; }
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
// During deoptimization only the result registers need to be restored,
// all the other values have already been extracted.
// rsp: pointer to the spilled rbp in the entry frame
//
// Kills:
// rbx
//
! void static continuation_enter_cleanup(MacroAssembler* masm) {
#ifdef ASSERT
Label L_good_sp;
__ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
__ jcc(Assembler::equal, L_good_sp);
__ stop("Incorrect rsp at continuation_enter_cleanup");
// rsp: pointer to the spilled rbp in the entry frame
//
// Kills:
// rbx
//
! static void continuation_enter_cleanup(MacroAssembler* masm) {
#ifdef ASSERT
Label L_good_sp;
__ cmpptr(rsp, Address(r15_thread, JavaThread::cont_entry_offset()));
__ jcc(Assembler::equal, L_good_sp);
__ stop("Incorrect rsp at continuation_enter_cleanup");
// --- Thawing path
__ bind(L_thaw);
+ ContinuationEntry::_thaw_call_pc_offset = __ pc() - start;
__ call(RuntimeAddress(StubRoutines::cont_thaw()));
ContinuationEntry::_return_pc_offset = __ pc() - start;
oop_maps->add_gc_map(__ pc() - start, map->deep_copy());
__ post_call_nop();
__ leave();
__ ret(0);
}
+ void SharedRuntime::continuation_enter_cleanup(MacroAssembler* masm) {
+ ::continuation_enter_cleanup(masm);
+ }
+
static void gen_special_dispatch(MacroAssembler* masm,
const methodHandle& method,
const BasicType* sig_bt,
const VMRegPair* regs) {
verify_oop_args(masm, method, sig_bt, regs);
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
+ __ jmp (lock_done);
+
__ bind(count_mon);
__ inc_held_monitor_count();
// Slow path will re-enter here
__ bind(lock_done);
// change thread state
__ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_Java);
__ bind(after_transition);
+ // Check preemption for Object.wait()
+ if (method->is_object_wait0()) {
+ Label not_preempted;
+ __ movptr(rscratch1, Address(r15_thread, JavaThread::preempt_alternate_return_offset()));
+ __ cmpptr(rscratch1, NULL_WORD);
+ __ jccb(Assembler::equal, not_preempted);
+ __ movptr(Address(r15_thread, JavaThread::preempt_alternate_return_offset()), NULL_WORD);
+ __ jmp(rscratch1);
+ __ bind(not_preempted);
+ }
+ int resume_wait_offset = ((intptr_t)__ pc()) - start;
+
Label reguard;
Label reguard_done;
__ cmpl(Address(r15_thread, JavaThread::stack_guard_state_offset()), StackOverflow::stack_guard_yellow_reserved_disabled);
__ jcc(Assembler::equal, reguard);
__ bind(reguard_done);
if (LockingMode == LM_LEGACY) {
Label not_recur;
// Simple recursive lock?
__ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), NULL_WORD);
__ jcc(Assembler::notEqual, not_recur);
- __ dec_held_monitor_count();
__ jmpb(fast_done);
__ bind(not_recur);
}
// Must save rax if it is live now because cmpxchg must use it
__ jcc(Assembler::notEqual, slow_path_unlock);
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
__ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
- __ dec_held_monitor_count();
}
// slow path re-enters here
__ bind(unlock_done);
if (ret_type != T_FLOAT && ret_type != T_DOUBLE && ret_type != T_VOID) {
__ mov(c_rarg0, obj_reg);
__ mov(c_rarg1, lock_reg);
__ mov(c_rarg2, r15_thread);
// Not a leaf but we have last_Java_frame setup as we want
+ // Force freeze slow path on ObjectMonitor::enter() for now which will fail with freeze_pinned_native.
+ __ push_cont_fastpath();
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
+ __ pop_cont_fastpath();
restore_args(masm, total_c_args, c_arg, out_regs);
#ifdef ASSERT
{ Label L;
__ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
stack_slots / VMRegImpl::slots_per_word,
(is_static ? in_ByteSize(klass_offset) : in_ByteSize(receiver_offset)),
in_ByteSize(lock_slot_offset*VMRegImpl::stack_slot_size),
oop_maps);
+ if (nm != nullptr && method->is_object_wait0()) {
+ SharedRuntime::set_native_frame_resume_entry(nm->code_begin() + resume_wait_offset);
+ }
+
return nm;
}
// this function returns the adjust size (in number of words) to a c2i adapter
// activation for use during deoptimization
// return address.
uint SharedRuntime::in_preserve_stack_slots() {
return 4 + 2 * VerifyStackAtCalls;
}
+ VMReg SharedRuntime::thread_register() {
+ return r15_thread->as_VMReg();
+ }
+
//------------------------------generate_deopt_blob----------------------------
void SharedRuntime::generate_deopt_blob() {
// Allocate space for the code
ResourceMark rm;
// Setup code generation tools
< prev index next >