< prev index next >

src/hotspot/share/runtime/lockStack.inline.hpp

Print this page

 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 28 #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 29 
 30 #include "runtime/lockStack.hpp"
 31 
 32 #include "memory/iterator.hpp"


 33 #include "runtime/javaThread.hpp"


 34 #include "runtime/safepoint.hpp"
 35 #include "runtime/stackWatermark.hpp"
 36 #include "runtime/stackWatermarkSet.inline.hpp"

 37 #include "utilities/align.hpp"
 38 #include "utilities/globalDefinitions.hpp"
 39 
 40 inline int LockStack::to_index(uint32_t offset) {
 41   assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
 42   assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
 43   assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
 44   return (offset - lock_stack_base_offset) / oopSize;
 45 }
 46 
 47 JavaThread* LockStack::get_thread() const {
 48   char* addr = reinterpret_cast<char*>(const_cast<LockStack*>(this));
 49   return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
 50 }
 51 
 52 inline bool LockStack::is_full() const {
 53   return to_index(_top) == CAPACITY;
 54 }
 55 




 56 inline bool LockStack::is_owning_thread() const {
 57   Thread* current = Thread::current();
 58   if (current->is_Java_thread()) {
 59     JavaThread* thread = JavaThread::cast(current);
 60     bool is_owning = &thread->lock_stack() == this;
 61     assert(is_owning == (get_thread() == thread), "is_owning sanity");
 62     return is_owning;
 63   }
 64   return false;
 65 }
 66 
 67 inline void LockStack::push(oop o) {
 68   verify("pre-push");
 69   assert(oopDesc::is_oop(o), "must be");
 70   assert(!contains(o), "entries must be unique");
 71   assert(!is_full(), "must have room");
 72   assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
 73   _base[to_index(_top)] = o;
 74   _top += oopSize;
 75   verify("post-push");
 76 }
 77 
 78 inline oop LockStack::bottom() const {
 79   assert(to_index(_top) > 0, "must contain an oop");
 80   return _base[0];
 81 }
 82 





 83 inline bool LockStack::is_empty() const {
 84   return to_index(_top) == 0;
 85 }
 86 
 87 inline bool LockStack::is_recursive(oop o) const {
 88   if (!VM_Version::supports_recursive_lightweight_locking()) {
 89     return false;
 90   }
 91   verify("pre-is_recursive");
 92 
 93   // This will succeed iff there is a consecutive run of oops on the
 94   // lock-stack with a length of at least 2.
 95 
 96   assert(contains(o), "at least one entry must exist");
 97   int end = to_index(_top);
 98   // Start iterating from the top because the runtime code is more
 99   // interested in the balanced locking case when the top oop on the
100   // lock-stack matches o. This will cause the for loop to break out
101   // in the first loop iteration if it is non-recursive.
102   for (int i = end - 1; i > 0; i--) {

205   int end = to_index(_top);
206   for (int i = end - 1; i >= 0; i--) {
207     if (_base[i] == o) {
208       verify("post-contains");
209       return true;
210     }
211   }
212   verify("post-contains");
213   return false;
214 }
215 
216 inline void LockStack::oops_do(OopClosure* cl) {
217   verify("pre-oops-do");
218   int end = to_index(_top);
219   for (int i = 0; i < end; i++) {
220     cl->do_oop(&_base[i]);
221   }
222   verify("post-oops-do");
223 }
224 





















































225 #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP

 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 28 #define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
 29 
 30 #include "runtime/lockStack.hpp"
 31 
 32 #include "memory/iterator.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "runtime/globals.hpp"
 35 #include "runtime/javaThread.hpp"
 36 #include "runtime/lightweightSynchronizer.hpp"
 37 #include "runtime/objectMonitor.inline.hpp"
 38 #include "runtime/safepoint.hpp"
 39 #include "runtime/stackWatermark.hpp"
 40 #include "runtime/stackWatermarkSet.inline.hpp"
 41 #include "runtime/synchronizer.hpp"
 42 #include "utilities/align.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 
 45 inline int LockStack::to_index(uint32_t offset) {
 46   assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
 47   assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
 48   assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
 49   return (offset - lock_stack_base_offset) / oopSize;
 50 }
 51 
 52 JavaThread* LockStack::get_thread() const {
 53   char* addr = reinterpret_cast<char*>(const_cast<LockStack*>(this));
 54   return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
 55 }
 56 
 57 inline bool LockStack::is_full() const {
 58   return to_index(_top) == CAPACITY;
 59 }
 60 
 61 inline bool LockStack::can_push(int n) const {
 62   return (CAPACITY - to_index(_top)) >= n;
 63 }
 64 
 65 inline bool LockStack::is_owning_thread() const {
 66   Thread* current = Thread::current();
 67   if (current->is_Java_thread()) {
 68     JavaThread* thread = JavaThread::cast(current);
 69     bool is_owning = &thread->lock_stack() == this;
 70     assert(is_owning == (get_thread() == thread), "is_owning sanity");
 71     return is_owning;
 72   }
 73   return false;
 74 }
 75 
 76 inline void LockStack::push(oop o) {
 77   verify("pre-push");
 78   assert(oopDesc::is_oop(o), "must be");
 79   assert(!contains(o), "entries must be unique");
 80   assert(!is_full(), "must have room");
 81   assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
 82   _base[to_index(_top)] = o;
 83   _top += oopSize;
 84   verify("post-push");
 85 }
 86 
 87 inline oop LockStack::bottom() const {
 88   assert(to_index(_top) > 0, "must contain an oop");
 89   return _base[0];
 90 }
 91 
 92 inline oop LockStack::top() {
 93   assert(to_index(_top) > 0, "may only call with at least one element in the stack");
 94   return _base[to_index(_top) - 1];
 95 }
 96 
 97 inline bool LockStack::is_empty() const {
 98   return to_index(_top) == 0;
 99 }
100 
101 inline bool LockStack::is_recursive(oop o) const {
102   if (!VM_Version::supports_recursive_lightweight_locking()) {
103     return false;
104   }
105   verify("pre-is_recursive");
106 
107   // This will succeed iff there is a consecutive run of oops on the
108   // lock-stack with a length of at least 2.
109 
110   assert(contains(o), "at least one entry must exist");
111   int end = to_index(_top);
112   // Start iterating from the top because the runtime code is more
113   // interested in the balanced locking case when the top oop on the
114   // lock-stack matches o. This will cause the for loop to break out
115   // in the first loop iteration if it is non-recursive.
116   for (int i = end - 1; i > 0; i--) {

219   int end = to_index(_top);
220   for (int i = end - 1; i >= 0; i--) {
221     if (_base[i] == o) {
222       verify("post-contains");
223       return true;
224     }
225   }
226   verify("post-contains");
227   return false;
228 }
229 
230 inline void LockStack::oops_do(OopClosure* cl) {
231   verify("pre-oops-do");
232   int end = to_index(_top);
233   for (int i = 0; i < end; i++) {
234     cl->do_oop(&_base[i]);
235   }
236   verify("post-oops-do");
237 }
238 
239 inline void OMCache::set_monitor(ObjectMonitor *monitor) {
240   const int end = OMCacheSize - 1;
241   if (end < 0) {
242     return;
243   }
244 
245   oop obj = monitor->object_peek();
246   assert(obj != nullptr, "must be alive");
247   assert(monitor == LightweightSynchronizer::read_monitor(JavaThread::current(), obj), "must be exist in table");
248 
249   OMCacheEntry to_insert = {obj, monitor};
250 
251   for (int i = 0; i < end; ++i) {
252     if (_entries[i]._oop == obj ||
253         _entries[i]._monitor == nullptr ||
254         _entries[i]._monitor->is_being_async_deflated()) {
255       // Use stale slot.
256       _entries[i] = to_insert;
257       return;
258     }
259     // Swap with the most recent value.
260     ::swap(to_insert, _entries[i]);
261   }
262   _entries[end] = to_insert;
263 }
264 
265 inline ObjectMonitor* OMCache::get_monitor(oop o) {
266   for (int i = 0; i < OMCacheSize; ++i) {
267     if (_entries[i]._oop == o) {
268       assert(_entries[i]._monitor != nullptr, "monitor must exist");
269       if (_entries[i]._monitor->is_being_async_deflated()) {
270         // Bad monitor
271         // Shift down rest
272         for (; i < OMCacheSize - 1; ++i) {
273           _entries[i] = _entries[i + 1];
274         }
275         // Clear end
276         _entries[i] = {};
277         return nullptr;
278       }
279       return _entries[i]._monitor;
280     }
281   }
282   return nullptr;
283 }
284 
285 inline void OMCache::clear() {
286   for (size_t i = 0 , r = 0; i < CAPACITY; ++i) {
287     // Clear
288     _entries[i] = {};
289   }
290 }
291 
292 #endif // SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
< prev index next >