1 /*
  2  * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 28 #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 29 
 30 #include "runtime/lockStack.hpp"
 31 
 32 #include "memory/iterator.hpp"
 33 #include "runtime/javaThread.hpp"
 34 #include "runtime/safepoint.hpp"
 35 #include "runtime/stackWatermark.hpp"
 36 #include "runtime/stackWatermarkSet.inline.hpp"
 37 #include "utilities/align.hpp"
 38 #include "utilities/globalDefinitions.hpp"
 39 
 40 inline int LockStack::to_index(uint32_t offset) {
 41   assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
 42   assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
 43   assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
 44   return (offset - lock_stack_base_offset) / oopSize;
 45 }
 46 
 47 JavaThread* LockStack::get_thread() const {
 48   char* addr = reinterpret_cast<char*>(const_cast<LockStack*>(this));
 49   return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
 50 }
 51 
 52 inline bool LockStack::is_full() const {
 53   return to_index(_top) == CAPACITY;
 54 }
 55 
 56 inline bool LockStack::is_owning_thread() const {
 57   Thread* current = Thread::current();
 58   if (current->is_Java_thread()) {
 59     JavaThread* thread = JavaThread::cast(current);
 60     bool is_owning = &thread->lock_stack() == this;
 61     assert(is_owning == (get_thread() == thread), "is_owning sanity");
 62     return is_owning;
 63   }
 64   return false;
 65 }
 66 
 67 inline void LockStack::push(oop o) {
 68   verify("pre-push");
 69   assert(oopDesc::is_oop(o), "must be");
 70   assert(!contains(o), "entries must be unique");
 71   assert(!is_full(), "must have room");
 72   assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
 73   _base[to_index(_top)] = o;
 74   _top += oopSize;
 75   verify("post-push");
 76 }
 77 
 78 inline oop LockStack::bottom() const {
 79   assert(to_index(_top) > 0, "must contain an oop");
 80   return _base[0];
 81 }
 82 
 83 inline bool LockStack::is_empty() const {
 84   return to_index(_top) == 0;
 85 }
 86 
 87 inline bool LockStack::is_recursive(oop o) const {
 88   if (!VM_Version::supports_recursive_lightweight_locking()) {
 89     return false;
 90   }
 91   verify("pre-is_recursive");
 92 
 93   // This will succeed iff there is a consecutive run of oops on the
 94   // lock-stack with a length of at least 2.
 95 
 96   assert(contains(o), "at least one entry must exist");
 97   int end = to_index(_top);
 98   // Start iterating from the top because the runtime code is more
 99   // interested in the balanced locking case when the top oop on the
100   // lock-stack matches o. This will cause the for loop to break out
101   // in the first loop iteration if it is non-recursive.
102   for (int i = end - 1; i > 0; i--) {
103     if (_base[i - 1] == o && _base[i] == o) {
104       verify("post-is_recursive");
105       return true;
106     }
107     if (_base[i] == o) {
108       // o can only occur in one consecutive run on the lock-stack.
109       // Only one of the two oops checked matched o, so this run
110       // must be of length 1 and thus not be recursive. Stop the search.
111       break;
112     }
113   }
114 
115   verify("post-is_recursive");
116   return false;
117 }
118 
119 inline bool LockStack::try_recursive_enter(oop o) {
120   if (!VM_Version::supports_recursive_lightweight_locking()) {
121     return false;
122   }
123   verify("pre-try_recursive_enter");
124 
125   // This will succeed iff the top oop on the stack matches o.
126   // When successful o will be pushed to the lock-stack creating
127   // a consecutive run at least 2 oops that matches o on top of
128   // the lock-stack.
129 
130   assert(!is_full(), "precond");
131 
132   int end = to_index(_top);
133   if (end == 0 || _base[end - 1] != o) {
134     // Topmost oop does not match o.
135     verify("post-try_recursive_enter");
136     return false;
137   }
138 
139   _base[end] = o;
140   _top += oopSize;
141   verify("post-try_recursive_enter");
142   return true;
143 }
144 
145 inline bool LockStack::try_recursive_exit(oop o) {
146   if (!VM_Version::supports_recursive_lightweight_locking()) {
147     return false;
148   }
149   verify("pre-try_recursive_exit");
150 
151   // This will succeed iff the top two oops on the stack matches o.
152   // When successful the top oop will be popped of the lock-stack.
153   // When unsuccessful the lock may still be recursive, in which
154   // case the locking is unbalanced. This case is handled externally.
155 
156   assert(contains(o), "entries must exist");
157 
158   int end = to_index(_top);
159   if (end <= 1 || _base[end - 1] != o || _base[end - 2] != o) {
160     // The two topmost oops do not match o.
161     verify("post-try_recursive_exit");
162     return false;
163   }
164 
165   _top -= oopSize;
166   DEBUG_ONLY(_base[to_index(_top)] = nullptr;)
167   verify("post-try_recursive_exit");
168   return true;
169 }
170 
171 inline size_t LockStack::remove(oop o) {
172   verify("pre-remove");
173   assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o));
174 
175   int end = to_index(_top);
176   int inserted = 0;
177   for (int i = 0; i < end; i++) {
178     if (_base[i] != o) {
179       if (inserted != i) {
180         _base[inserted] = _base[i];
181       }
182       inserted++;
183     }
184   }
185 
186 #ifdef ASSERT
187   for (int i = inserted; i < end; i++) {
188     _base[i] = nullptr;
189   }
190 #endif
191 
192   uint32_t removed = end - inserted;
193   _top -= removed * oopSize;
194   assert(!contains(o), "entry must have been removed: " PTR_FORMAT, p2i(o));
195   verify("post-remove");
196   return removed;
197 }
198 
199 inline bool LockStack::contains(oop o) const {
200   verify("pre-contains");
201 
202   // Can't poke around in thread oops without having started stack watermark processing.
203   assert(StackWatermarkSet::processing_started(get_thread()), "Processing must have started!");
204 
205   int end = to_index(_top);
206   for (int i = end - 1; i >= 0; i--) {
207     if (_base[i] == o) {
208       verify("post-contains");
209       return true;
210     }
211   }
212   verify("post-contains");
213   return false;
214 }
215 
216 inline void LockStack::oops_do(OopClosure* cl) {
217   verify("pre-oops-do");
218   int end = to_index(_top);
219   for (int i = 0; i < end; i++) {
220     cl->do_oop(&_base[i]);
221   }
222   verify("post-oops-do");
223 }
224 
225 #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP