1 /*
  2  * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "opto/c2_MacroAssembler.hpp"
 27 #include "opto/c2_CodeStubs.hpp"
 28 #include "runtime/objectMonitor.hpp"
 29 #include "runtime/sharedRuntime.hpp"
 30 #include "runtime/stubRoutines.hpp"
 31 
 32 #define __ masm.
 33 
 34 int C2SafepointPollStub::max_size() const {
 35   return 33;
 36 }
 37 
 38 void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
 39   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 40          "polling page return stub not created yet");
 41   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 42 
 43   RuntimeAddress callback_addr(stub);
 44 
 45   __ bind(entry());
 46   InternalAddress safepoint_pc(masm.pc() - masm.offset() + _safepoint_offset);
 47 #ifdef _LP64
 48   __ lea(rscratch1, safepoint_pc);
 49   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 50 #else
 51   const Register tmp1 = rcx;
 52   const Register tmp2 = rdx;
 53   __ push(tmp1);
 54   __ push(tmp2);
 55 
 56   __ lea(tmp1, safepoint_pc);
 57   __ get_thread(tmp2);
 58   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 59 
 60   __ pop(tmp2);
 61   __ pop(tmp1);
 62 #endif
 63   __ jump(callback_addr);
 64 }
 65 
 66 int C2EntryBarrierStub::max_size() const {
 67   return 10;
 68 }
 69 
 70 void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
 71   __ bind(entry());
 72   __ call(RuntimeAddress(StubRoutines::method_entry_barrier()));
 73   __ jmp(continuation(), false /* maybe_short */);
 74 }
 75 
 76 #ifdef _LP64
 77 int C2HandleAnonOMOwnerStub::max_size() const {
 78   // Max size of stub has been determined by testing with 0, in which case
 79   // C2CodeStubList::emit() will throw an assertion and report the actual size that
 80   // is needed.
 81   return DEBUG_ONLY(40) NOT_DEBUG(25);
 82 }
 83 
 84 void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
 85   __ bind(entry());
 86   Register mon = monitor();
 87   Register t = tmp();
 88   __ movptr(t, Address(r15_thread, JavaThread::lock_id_offset()));
 89   __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), t);
 90   if (LockingMode == LM_LIGHTWEIGHT) {
 91     __ subl(Address(r15_thread, JavaThread::lock_stack_top_offset()), oopSize);
 92 #ifdef ASSERT
 93     __ movl(t, Address(r15_thread, JavaThread::lock_stack_top_offset()));
 94     __ movptr(Address(r15_thread, t), 0);
 95 #endif
 96   } else {
 97     __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(stack_locker)), NULL_WORD);
 98     __ decrementq(Address(r15_thread, JavaThread::held_monitor_count_offset()));
 99   }
100   __ jmp(continuation());
101 }
102 #endif
103 
104 int C2FastUnlockLightweightStub::max_size() const {
105   return 128;
106 }
107 
108 void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
109   assert(_t1 == rax, "must be");
110 
111   Label slow_path;
112 
113   { // Restore lock-stack and handle the unlock in runtime.
114 
115     __ bind(_push_and_slow_path);
116 #ifdef ASSERT
117     // The obj was only cleared in debug.
118     __ movl(_t1, Address(_thread, JavaThread::lock_stack_top_offset()));
119     __ movptr(Address(_thread, _t1), _obj);
120 #endif
121     __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
122   }
123 
124   { // Handle the unlock in runtime
125 
126     __ bind(slow_path);
127     // set ZF=0 to indicate failure
128     __ orl(_t1, 1);
129     __ jmp(slow_path_continuation());
130   }
131 
132   { // Handle monitor medium path.
133 
134     __ bind(_check_successor);
135 
136     Label fix_zf_and_unlocked;
137     const Register monitor = _mark;
138 
139 #ifndef _LP64
140     __ jmpb(slow_path);
141 #else // _LP64
142     // successor null check.
143     __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
144     __ jccb(Assembler::equal, slow_path);
145 
146     // Release lock.
147     __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
148 
149     // Fence.
150     // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
151     __ lock(); __ addl(Address(rsp, 0), 0);
152 
153     // Recheck successor.
154     __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
155     // Observed a successor after the release -> fence we have handed off the monitor
156     __ jccb(Assembler::notEqual, fix_zf_and_unlocked);
157 
158     // Try to relock, if it fails the monitor has been handed over
159     // TODO: Caveat, this may fail due to deflation, which does
160     //       not handle the monitor handoff. Currently only works
161     //       due to the responsible thread.
162     __ xorptr(rax, rax);
163     __ movptr(_t2, Address(_thread, JavaThread::lock_id_offset()));
164     __ lock(); __ cmpxchgptr(_t2, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
165     __ jccb  (Assembler::equal, slow_path);
166 #endif
167 
168     __ bind(fix_zf_and_unlocked);
169     __ xorl(rax, rax);
170     __ jmp(unlocked_continuation());
171   }
172 }
173 
174 #undef __