1 /*
  2  * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 28 #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 29 
 30 #include "runtime/lockStack.hpp"
 31 
 32 #include "memory/iterator.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/lightweightSynchronizer.hpp"
 35 #include "runtime/objectMonitor.inline.hpp"
 36 #include "runtime/safepoint.hpp"
 37 #include "runtime/stackWatermark.hpp"
 38 #include "runtime/stackWatermarkSet.inline.hpp"
 39 #include "utilities/align.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 
 42 inline int LockStack::to_index(uint32_t offset) {
 43   assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
 44   assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
 45   assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
 46   return (offset - lock_stack_base_offset) / oopSize;
 47 }
 48 
 49 JavaThread* LockStack::get_thread() const {
 50   char* addr = reinterpret_cast<char*>(const_cast<LockStack*>(this));
 51   return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
 52 }
 53 
 54 inline bool LockStack::is_full() const {
 55   return to_index(_top) == CAPACITY;
 56 }
 57 
 58 inline bool LockStack::is_owning_thread() const {
 59   Thread* current = Thread::current();
 60   if (current->is_Java_thread()) {
 61     JavaThread* thread = JavaThread::cast(current);
 62     bool is_owning = &thread->lock_stack() == this;
 63     assert(is_owning == (get_thread() == thread), "is_owning sanity");
 64     return is_owning;
 65   }
 66   return false;
 67 }
 68 
 69 inline void LockStack::push(oop o) {
 70   verify("pre-push");
 71   assert(oopDesc::is_oop(o), "must be");
 72   assert(!contains(o), "entries must be unique");
 73   assert(!is_full(), "must have room");
 74   assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
 75   _base[to_index(_top)] = o;
 76   _top += oopSize;
 77   verify("post-push");
 78 }
 79 
 80 inline oop LockStack::bottom() const {
 81   assert(to_index(_top) > 0, "must contain an oop");
 82   return _base[0];
 83 }
 84 
 85 inline bool LockStack::is_empty() const {
 86   return to_index(_top) == 0;
 87 }
 88 
 89 inline bool LockStack::is_recursive(oop o) const {
 90   if (!VM_Version::supports_recursive_lightweight_locking()) {
 91     return false;
 92   }
 93   verify("pre-is_recursive");
 94 
 95   // This will succeed iff there is a consecutive run of oops on the
 96   // lock-stack with a length of at least 2.
 97 
 98   assert(contains(o), "at least one entry must exist");
 99   int end = to_index(_top);
100   // Start iterating from the top because the runtime code is more
101   // interested in the balanced locking case when the top oop on the
102   // lock-stack matches o. This will cause the for loop to break out
103   // in the first loop iteration if it is non-recursive.
104   for (int i = end - 1; i > 0; i--) {
105     if (_base[i - 1] == o && _base[i] == o) {
106       verify("post-is_recursive");
107       return true;
108     }
109     if (_base[i] == o) {
110       // o can only occur in one consecutive run on the lock-stack.
111       // Only one of the two oops checked matched o, so this run
112       // must be of length 1 and thus not be recursive. Stop the search.
113       break;
114     }
115   }
116 
117   verify("post-is_recursive");
118   return false;
119 }
120 
121 inline bool LockStack::try_recursive_enter(oop o) {
122   if (!VM_Version::supports_recursive_lightweight_locking()) {
123     return false;
124   }
125   verify("pre-try_recursive_enter");
126 
127   // This will succeed iff the top oop on the stack matches o.
128   // When successful o will be pushed to the lock-stack creating
129   // a consecutive run at least 2 oops that matches o on top of
130   // the lock-stack.
131 
132   assert(!is_full(), "precond");
133 
134   int end = to_index(_top);
135   if (end == 0 || _base[end - 1] != o) {
136     // Topmost oop does not match o.
137     verify("post-try_recursive_enter");
138     return false;
139   }
140 
141   _base[end] = o;
142   _top += oopSize;
143   verify("post-try_recursive_enter");
144   return true;
145 }
146 
147 inline bool LockStack::try_recursive_exit(oop o) {
148   if (!VM_Version::supports_recursive_lightweight_locking()) {
149     return false;
150   }
151   verify("pre-try_recursive_exit");
152 
153   // This will succeed iff the top two oops on the stack matches o.
154   // When successful the top oop will be popped of the lock-stack.
155   // When unsuccessful the lock may still be recursive, in which
156   // case the locking is unbalanced. This case is handled externally.
157 
158   assert(contains(o), "entries must exist");
159 
160   int end = to_index(_top);
161   if (end <= 1 || _base[end - 1] != o || _base[end - 2] != o) {
162     // The two topmost oops do not match o.
163     verify("post-try_recursive_exit");
164     return false;
165   }
166 
167   _top -= oopSize;
168   DEBUG_ONLY(_base[to_index(_top)] = nullptr;)
169   verify("post-try_recursive_exit");
170   return true;
171 }
172 
173 inline size_t LockStack::remove(oop o) {
174   verify("pre-remove");
175   assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o));
176 
177   int end = to_index(_top);
178   int inserted = 0;
179   for (int i = 0; i < end; i++) {
180     if (_base[i] != o) {
181       if (inserted != i) {
182         _base[inserted] = _base[i];
183       }
184       inserted++;
185     }
186   }
187 
188 #ifdef ASSERT
189   for (int i = inserted; i < end; i++) {
190     _base[i] = nullptr;
191   }
192 #endif
193 
194   uint32_t removed = end - inserted;
195   _top -= removed * oopSize;
196   assert(!contains(o), "entry must have been removed: " PTR_FORMAT, p2i(o));
197   verify("post-remove");
198   return removed;
199 }
200 
201 inline bool LockStack::contains(oop o) const {
202   verify("pre-contains");
203 
204   // Can't poke around in thread oops without having started stack watermark processing.
205   assert(StackWatermarkSet::processing_started(get_thread()), "Processing must have started!");
206 
207   int end = to_index(_top);
208   for (int i = end - 1; i >= 0; i--) {
209     if (_base[i] == o) {
210       verify("post-contains");
211       return true;
212     }
213   }
214   verify("post-contains");
215   return false;
216 }
217 
218 inline void LockStack::oops_do(OopClosure* cl) {
219   verify("pre-oops-do");
220   int end = to_index(_top);
221   for (int i = 0; i < end; i++) {
222     cl->do_oop(&_base[i]);
223   }
224   verify("post-oops-do");
225 }
226 
227 inline void OMCache::set_monitor(ObjectMonitor *monitor) {
228   const int end = OMCache::CAPACITY - 1;
229 
230   oop obj = monitor->object_peek();
231   assert(obj != nullptr, "must be alive");
232   assert(monitor == LightweightSynchronizer::get_monitor_from_table(JavaThread::current(), obj), "must be exist in table");
233 
234   OMCacheEntry to_insert = {obj, monitor};
235 
236   for (int i = 0; i < end; ++i) {
237     if (_entries[i]._oop == obj ||
238         _entries[i]._monitor == nullptr ||
239         _entries[i]._monitor->is_being_async_deflated()) {
240       // Use stale slot.
241       _entries[i] = to_insert;
242       return;
243     }
244     // Swap with the most recent value.
245     ::swap(to_insert, _entries[i]);
246   }
247   _entries[end] = to_insert;
248 }
249 
250 inline ObjectMonitor* OMCache::get_monitor(oop o) {
251   for (int i = 0; i < CAPACITY; ++i) {
252     if (_entries[i]._oop == o) {
253       assert(_entries[i]._monitor != nullptr, "monitor must exist");
254       if (_entries[i]._monitor->is_being_async_deflated()) {
255         // Bad monitor
256         // Shift down rest
257         for (; i < CAPACITY - 1; ++i) {
258           _entries[i] = _entries[i + 1];
259         }
260         // Clear end
261         _entries[i] = {};
262         return nullptr;
263       }
264       return _entries[i]._monitor;
265     }
266   }
267   return nullptr;
268 }
269 
270 inline void OMCache::clear() {
271   for (size_t i = 0 , r = 0; i < CAPACITY; ++i) {
272     // Clear
273     _entries[i] = {};
274   }
275 }
276 
277 #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP