1 /*
  2  * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 28 #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 29 
 30 #include "runtime/lockStack.hpp"
 31 
 32 #include "memory/iterator.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/javaThread.hpp"
 36 #include "runtime/lightweightSynchronizer.hpp"
 37 #include "runtime/objectMonitor.inline.hpp"
 38 #include "runtime/safepoint.hpp"
 39 #include "runtime/stackWatermark.hpp"
 40 #include "runtime/stackWatermarkSet.inline.hpp"
 41 #include "runtime/synchronizer.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 
 45 inline int LockStack::to_index(uint32_t offset) {
 46   assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
 47   assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
 48   assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
 49   return (offset - lock_stack_base_offset) / oopSize;
 50 }
 51 
 52 JavaThread* LockStack::get_thread() const {
 53   char* addr = reinterpret_cast<char*>(const_cast<LockStack*>(this));
 54   return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
 55 }
 56 
 57 inline bool LockStack::is_full() const {
 58   return to_index(_top) == CAPACITY;
 59 }
 60 
 61 inline bool LockStack::can_push(int n) const {
 62   return (CAPACITY - to_index(_top)) >= n;
 63 }
 64 
 65 inline bool LockStack::is_owning_thread() const {
 66   Thread* current = Thread::current();
 67   if (current->is_Java_thread()) {
 68     JavaThread* thread = JavaThread::cast(current);
 69     bool is_owning = &thread->lock_stack() == this;
 70     assert(is_owning == (get_thread() == thread), "is_owning sanity");
 71     return is_owning;
 72   }
 73   return false;
 74 }
 75 
 76 inline void LockStack::push(oop o) {
 77   verify("pre-push");
 78   assert(oopDesc::is_oop(o), "must be");
 79   assert(!contains(o), "entries must be unique");
 80   assert(!is_full(), "must have room");
 81   assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
 82   _base[to_index(_top)] = o;
 83   _top += oopSize;
 84   verify("post-push");
 85 }
 86 
 87 inline oop LockStack::bottom() const {
 88   assert(to_index(_top) > 0, "must contain an oop");
 89   return _base[0];
 90 }
 91 
 92 inline oop LockStack::top() {
 93   assert(to_index(_top) > 0, "may only call with at least one element in the stack");
 94   return _base[to_index(_top) - 1];
 95 }
 96 
 97 inline bool LockStack::is_empty() const {
 98   return to_index(_top) == 0;
 99 }
100 
101 inline bool LockStack::is_recursive(oop o) const {
102   if (!VM_Version::supports_recursive_lightweight_locking()) {
103     return false;
104   }
105   verify("pre-is_recursive");
106 
107   // This will succeed iff there is a consecutive run of oops on the
108   // lock-stack with a length of at least 2.
109 
110   assert(contains(o), "at least one entry must exist");
111   int end = to_index(_top);
112   // Start iterating from the top because the runtime code is more
113   // interested in the balanced locking case when the top oop on the
114   // lock-stack matches o. This will cause the for loop to break out
115   // in the first loop iteration if it is non-recursive.
116   for (int i = end - 1; i > 0; i--) {
117     if (_base[i - 1] == o && _base[i] == o) {
118       verify("post-is_recursive");
119       return true;
120     }
121     if (_base[i] == o) {
122       // o can only occur in one consecutive run on the lock-stack.
123       // Only one of the two oops checked matched o, so this run
124       // must be of length 1 and thus not be recursive. Stop the search.
125       break;
126     }
127   }
128 
129   verify("post-is_recursive");
130   return false;
131 }
132 
133 inline bool LockStack::try_recursive_enter(oop o) {
134   if (!VM_Version::supports_recursive_lightweight_locking()) {
135     return false;
136   }
137   verify("pre-try_recursive_enter");
138 
139   // This will succeed iff the top oop on the stack matches o.
140   // When successful o will be pushed to the lock-stack creating
141   // a consecutive run at least 2 oops that matches o on top of
142   // the lock-stack.
143 
144   assert(!is_full(), "precond");
145 
146   int end = to_index(_top);
147   if (end == 0 || _base[end - 1] != o) {
148     // Topmost oop does not match o.
149     verify("post-try_recursive_enter");
150     return false;
151   }
152 
153   _base[end] = o;
154   _top += oopSize;
155   verify("post-try_recursive_enter");
156   return true;
157 }
158 
159 inline bool LockStack::try_recursive_exit(oop o) {
160   if (!VM_Version::supports_recursive_lightweight_locking()) {
161     return false;
162   }
163   verify("pre-try_recursive_exit");
164 
165   // This will succeed iff the top two oops on the stack matches o.
166   // When successful the top oop will be popped of the lock-stack.
167   // When unsuccessful the lock may still be recursive, in which
168   // case the locking is unbalanced. This case is handled externally.
169 
170   assert(contains(o), "entries must exist");
171 
172   int end = to_index(_top);
173   if (end <= 1 || _base[end - 1] != o || _base[end - 2] != o) {
174     // The two topmost oops do not match o.
175     verify("post-try_recursive_exit");
176     return false;
177   }
178 
179   _top -= oopSize;
180   DEBUG_ONLY(_base[to_index(_top)] = nullptr;)
181   verify("post-try_recursive_exit");
182   return true;
183 }
184 
185 inline size_t LockStack::remove(oop o) {
186   verify("pre-remove");
187   assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o));
188 
189   int end = to_index(_top);
190   int inserted = 0;
191   for (int i = 0; i < end; i++) {
192     if (_base[i] != o) {
193       if (inserted != i) {
194         _base[inserted] = _base[i];
195       }
196       inserted++;
197     }
198   }
199 
200 #ifdef ASSERT
201   for (int i = inserted; i < end; i++) {
202     _base[i] = nullptr;
203   }
204 #endif
205 
206   uint32_t removed = end - inserted;
207   _top -= removed * oopSize;
208   assert(!contains(o), "entry must have been removed: " PTR_FORMAT, p2i(o));
209   verify("post-remove");
210   return removed;
211 }
212 
213 inline bool LockStack::contains(oop o) const {
214   verify("pre-contains");
215 
216   // Can't poke around in thread oops without having started stack watermark processing.
217   assert(StackWatermarkSet::processing_started(get_thread()), "Processing must have started!");
218 
219   int end = to_index(_top);
220   for (int i = end - 1; i >= 0; i--) {
221     if (_base[i] == o) {
222       verify("post-contains");
223       return true;
224     }
225   }
226   verify("post-contains");
227   return false;
228 }
229 
230 inline void LockStack::oops_do(OopClosure* cl) {
231   verify("pre-oops-do");
232   int end = to_index(_top);
233   for (int i = 0; i < end; i++) {
234     cl->do_oop(&_base[i]);
235   }
236   verify("post-oops-do");
237 }
238 
239 inline void OMCache::set_monitor(ObjectMonitor *monitor) {
240   const int end = OMCacheSize - 1;
241   if (end < 0) {
242     return;
243   }
244 
245   oop obj = monitor->object_peek();
246   assert(obj != nullptr, "must be alive");
247   assert(monitor == LightweightSynchronizer::read_monitor(JavaThread::current(), obj), "must be exist in table");
248 
249   OMCacheEntry to_insert = {obj, monitor};
250 
251   for (int i = 0; i < end; ++i) {
252     if (_entries[i]._oop == obj ||
253         _entries[i]._monitor == nullptr ||
254         _entries[i]._monitor->is_being_async_deflated()) {
255       // Use stale slot.
256       _entries[i] = to_insert;
257       return;
258     }
259     // Swap with the most recent value.
260     ::swap(to_insert, _entries[i]);
261   }
262   _entries[end] = to_insert;
263 }
264 
265 inline ObjectMonitor* OMCache::get_monitor(oop o) {
266   for (int i = 0; i < OMCacheSize; ++i) {
267     if (_entries[i]._oop == o) {
268       assert(_entries[i]._monitor != nullptr, "monitor must exist");
269       if (_entries[i]._monitor->is_being_async_deflated()) {
270         // Bad monitor
271         // Shift down rest
272         for (; i < OMCacheSize - 1; ++i) {
273           _entries[i] = _entries[i + 1];
274         }
275         // Clear end
276         _entries[i] = {};
277         return nullptr;
278       }
279       return _entries[i]._monitor;
280     }
281   }
282   return nullptr;
283 }
284 
285 inline void OMCache::clear() {
286   for (size_t i = 0 , r = 0; i < CAPACITY; ++i) {
287     // Clear
288     _entries[i] = {};
289   }
290 }
291 
292 #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP