< prev index next > src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
Print this page
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
__ bind(entry());
__ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
__ jmp(continuation(), false /* maybe_short */);
}
- #ifdef _LP64
- int C2HandleAnonOMOwnerStub::max_size() const {
- // Max size of stub has been determined by testing with 0, in which case
- // C2CodeStubList::emit() will throw an assertion and report the actual size that
- // is needed.
- return DEBUG_ONLY(36) NOT_DEBUG(21);
+ int C2FastUnlockLightweightStub::max_size() const {
+ return 128;
}
- void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
- __ bind(entry());
- Register mon = monitor();
- Register t = tmp();
- __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), r15_thread);
- __ subl(Address(r15_thread, JavaThread::lock_stack_top_offset()), oopSize);
+ void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
+ assert(_t == rax, "must be");
+
+ Label restore_held_monitor_count_and_slow_path;
+
+ { // Restore lock-stack and handle the unlock in runtime.
+
+ __ bind(_push_and_slow_path);
#ifdef ASSERT
- __ movl(t, Address(r15_thread, JavaThread::lock_stack_top_offset()));
- __ movptr(Address(r15_thread, t), 0);
+ // The obj was only cleared in debug.
+ __ movl(_t, Address(_thread, JavaThread::lock_stack_top_offset()));
+ __ movptr(Address(_thread, _t), _obj);
+ #endif
+ __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
+ }
+
+ { // Restore held monitor count and slow path.
+
+ __ bind(restore_held_monitor_count_and_slow_path);
+ // Restore held monitor count.
+ __ increment(Address(_thread, JavaThread::held_monitor_count_offset()));
+ // increment will always result in ZF = 0 (no overflows).
+ __ jmp(slow_path_continuation());
+ }
+
+ { // Handle monitor medium path.
+
+ __ bind(_check_successor);
+
+ Label fix_zf_and_unlocked;
+ const Register monitor = _mark;
+
+ #ifndef _LP64
+ __ jmpb(restore_held_monitor_count_and_slow_path);
+ #else // _LP64
+ // successor null check.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path);
+
+ // Release lock.
+ __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+
+ // Fence.
+ // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
+ __ lock(); __ addl(Address(rsp, 0), 0);
+
+ // Recheck successor.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ // Observed a successor after the release -> fence we have handed off the monitor
+ __ jccb(Assembler::notEqual, fix_zf_and_unlocked);
+
+ // Try to relock, if it fails the monitor has been handed over
+ // TODO: Caveat, this may fail due to deflation, which does
+ // not handle the monitor handoff. Currently only works
+ // due to the responsible thread.
+ __ xorptr(rax, rax);
+ __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ __ jccb (Assembler::equal, restore_held_monitor_count_and_slow_path);
#endif
+
+ __ bind(fix_zf_and_unlocked);
+ __ xorl(rax, rax);
+ __ jmp(unlocked_continuation());
+ }
+ }
+
+ #ifdef _LP64
+ int C2LoadNKlassStub::max_size() const {
+ return 10;
+ }
+
+ void C2LoadNKlassStub::emit(C2_MacroAssembler& masm) {
+ __ bind(entry());
+ Register d = dst();
+ __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
__ jmp(continuation());
}
#endif
#undef __
< prev index next >