1 /*
  2  * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "opto/c2_MacroAssembler.hpp"
 27 #include "opto/c2_CodeStubs.hpp"
 28 #include "runtime/objectMonitor.hpp"
 29 #include "runtime/sharedRuntime.hpp"
 30 #include "runtime/stubRoutines.hpp"
 31 
 32 #define __ masm.
 33 
 34 int C2SafepointPollStub::max_size() const {
 35   return 33;
 36 }
 37 
 38 void C2SafepointPollStub::emit(C2_MacroAssembler& masm) {
 39   assert(SharedRuntime::polling_page_return_handler_blob() != nullptr,
 40          "polling page return stub not created yet");
 41   address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
 42 
 43   RuntimeAddress callback_addr(stub);
 44 
 45   __ bind(entry());
 46   InternalAddress safepoint_pc(masm.pc() - masm.offset() + _safepoint_offset);
 47 #ifdef _LP64
 48   __ lea(rscratch1, safepoint_pc);
 49   __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
 50 #else
 51   const Register tmp1 = rcx;
 52   const Register tmp2 = rdx;
 53   __ push(tmp1);
 54   __ push(tmp2);
 55 
 56   __ lea(tmp1, safepoint_pc);
 57   __ get_thread(tmp2);
 58   __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
 59 
 60   __ pop(tmp2);
 61   __ pop(tmp1);
 62 #endif
 63   __ jump(callback_addr);
 64 }
 65 
 66 int C2EntryBarrierStub::max_size() const {
 67   return 10;
 68 }
 69 
 70 void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
 71   __ bind(entry());
 72   __ call(RuntimeAddress(StubRoutines::method_entry_barrier()));
 73   __ jmp(continuation(), false /* maybe_short */);
 74 }
 75 
 76 int C2FastUnlockLightweightStub::max_size() const {
 77   return 128;
 78 }
 79 
 80 void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
 81   assert(_t == rax, "must be");
 82 
 83   Label restore_held_monitor_count_and_slow_path;
 84 
 85   { // Restore lock-stack and handle the unlock in runtime.
 86 
 87     __ bind(_push_and_slow_path);
 88 #ifdef ASSERT
 89     // The obj was only cleared in debug.
 90     __ movl(_t, Address(_thread, JavaThread::lock_stack_top_offset()));
 91     __ movptr(Address(_thread, _t), _obj);
 92 #endif
 93     __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
 94   }
 95 
 96   { // Restore held monitor count and slow path.
 97 
 98     __ bind(restore_held_monitor_count_and_slow_path);
 99     // Restore held monitor count.
100     __ increment(Address(_thread, JavaThread::held_monitor_count_offset()));
101     // increment will always result in ZF = 0 (no overflows).
102     __ jmp(slow_path_continuation());
103   }
104 
105   { // Handle monitor medium path.
106 
107     __ bind(_check_successor);
108 
109     Label fix_zf_and_unlocked;
110     const Register monitor = _mark;
111 
112 #ifndef _LP64
113     __ jmpb(restore_held_monitor_count_and_slow_path);
114 #else // _LP64
115     // successor null check.
116     __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
117     __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path);
118 
119     // Release lock.
120     __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
121 
122     // Fence.
123     // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
124     __ lock(); __ addl(Address(rsp, 0), 0);
125 
126     // Recheck successor.
127     __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
128     // Observed a successor after the release -> fence we have handed off the monitor
129     __ jccb(Assembler::notEqual, fix_zf_and_unlocked);
130 
131     // Try to relock, if it fails the monitor has been handed over
132     // TODO: Caveat, this may fail due to deflation, which does
133     //       not handle the monitor handoff. Currently only works
134     //       due to the responsible thread.
135     __ xorptr(rax, rax);
136     __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
137     __ jccb  (Assembler::equal, restore_held_monitor_count_and_slow_path);
138 #endif
139 
140     __ bind(fix_zf_and_unlocked);
141     __ xorl(rax, rax);
142     __ jmp(unlocked_continuation());
143   }
144 }
145 
146 #ifdef _LP64
147 int C2LoadNKlassStub::max_size() const {
148   return 10;
149 }
150 
151 void C2LoadNKlassStub::emit(C2_MacroAssembler& masm) {
152   __ bind(entry());
153   Register d = dst();
154   __ movq(d, Address(d, OM_OFFSET_NO_MONITOR_VALUE_TAG(header)));
155   __ jmp(continuation());
156 }
157 #endif
158 
159 #undef __