1 /*
  2  * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
  3  * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
  4  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  5  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  6  *
  7  * This code is free software; you can redistribute it and/or modify it
  8  * under the terms of the GNU General Public License version 2 only, as
  9  * published by the Free Software Foundation.
 10  *
 11  * This code is distributed in the hope that it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 14  * version 2 for more details (a copy is included in the LICENSE file that
 15  * accompanied this code).
 16  *
 17  * You should have received a copy of the GNU General Public License version
 18  * 2 along with this work; if not, write to the Free Software Foundation,
 19  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 20  *
 21  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 22  * or visit www.oracle.com if you need additional information or have any
 23  * questions.
 24  *
 25  */
 26 
 27 #include "precompiled.hpp"
 28 #include "memory/allocation.hpp"
 29 #include "oops/markWord.hpp"
 30 #include "oops/oop.inline.hpp"
 31 #include "runtime/globals.hpp"
 32 #include "runtime/lockStack.inline.hpp"
 33 #include "runtime/objectMonitor.inline.hpp"
 34 #include "runtime/safepoint.hpp"
 35 #include "runtime/stackWatermark.hpp"
 36 #include "runtime/stackWatermarkSet.inline.hpp"
 37 #include "runtime/thread.hpp"
 38 #include "utilities/copy.hpp"
 39 #include "utilities/debug.hpp"
 40 #include "utilities/globalDefinitions.hpp"
 41 #include "utilities/growableArray.hpp"
 42 #include "utilities/ostream.hpp"
 43 #include "utilities/sizes.hpp"
 44 
 45 #include <type_traits>
 46 
 47 const int LockStack::lock_stack_offset =      in_bytes(JavaThread::lock_stack_offset());
 48 const int LockStack::lock_stack_top_offset =  in_bytes(JavaThread::lock_stack_top_offset());
 49 const int LockStack::lock_stack_base_offset = in_bytes(JavaThread::lock_stack_base_offset());
 50 
 51 LockStack::LockStack(JavaThread* jt) :
 52   _top(lock_stack_base_offset), _base() {
 53   // Make sure the layout of the object is compatible with the emitted code's assumptions.
 54   STATIC_ASSERT(sizeof(_bad_oop_sentinel) == oopSize);
 55   STATIC_ASSERT(sizeof(_base[0]) == oopSize);
 56   STATIC_ASSERT(std::is_standard_layout<LockStack>::value);
 57   STATIC_ASSERT(offsetof(LockStack, _bad_oop_sentinel) == offsetof(LockStack, _base) - oopSize);
 58 #ifdef ASSERT
 59   for (int i = 0; i < CAPACITY; i++) {
 60     _base[i] = nullptr;
 61   }
 62 #endif
 63 }
 64 
 65 uint32_t LockStack::start_offset() {
 66   int offset = lock_stack_base_offset;
 67   assert(offset > 0, "must be positive offset");
 68   return static_cast<uint32_t>(offset);
 69 }
 70 
 71 uint32_t LockStack::end_offset() {
 72   int offset = lock_stack_base_offset + CAPACITY * oopSize;
 73   assert(offset > 0, "must be positive offset");
 74   return static_cast<uint32_t>(offset);
 75 }
 76 
 77 #ifndef PRODUCT
 78 void LockStack::verify(const char* msg) const {
 79   assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled");
 80   assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset());
 81   assert((_top >= start_offset()), "lockstack underflow: _top %d start_offset %d", _top, start_offset());
 82   if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) {
 83     int top = to_index(_top);
 84     for (int i = 0; i < top; i++) {
 85       assert(_base[i] != nullptr, "no zapped before top");
 86       if (VM_Version::supports_recursive_lightweight_locking()) {
 87         oop o = _base[i];
 88         for (; i < top - 1; i++) {
 89           // Consecutive entries may be the same
 90           if (_base[i + 1] != o) {
 91             break;
 92           }
 93         }
 94       }
 95 
 96       for (int j = i + 1; j < top; j++) {
 97         assert(_base[i] != _base[j], "entries must be unique: %s", msg);
 98       }
 99     }
100     for (int i = top; i < CAPACITY; i++) {
101       assert(_base[i] == nullptr, "only zapped entries after top: i: %d, top: %d, entry: " PTR_FORMAT, i, top, p2i(_base[i]));
102     }
103   }
104 }
105 #endif
106 
107 #ifdef ASSERT
108 void LockStack::verify_consistent_lock_order(GrowableArray<oop>& lock_order, bool leaf_frame) const {
109   int top_index = to_index(_top);
110   int lock_index = lock_order.length();
111 
112   if (!leaf_frame) {
113     // If the lock_order is not from the leaf frame we must search
114     // for the top_index which fits with the most recent fast_locked
115     // objects in the lock stack.
116     while (lock_index-- > 0) {
117       const oop obj = lock_order.at(lock_index);
118       if (contains(obj)) {
119         for (int index = 0; index < top_index; index++) {
120           if (_base[index] == obj) {
121             // Found top index
122             top_index = index + 1;
123             break;
124           }
125         }
126 
127         if (VM_Version::supports_recursive_lightweight_locking()) {
128           // With recursive looks there may be more of the same object
129           while (lock_index-- > 0 && lock_order.at(lock_index) == obj) {
130             top_index++;
131           }
132           assert(top_index <= to_index(_top), "too many obj in lock_order");
133         }
134 
135         break;
136       }
137     }
138 
139     lock_index = lock_order.length();
140   }
141 
142   JavaThread* thread = get_thread();
143   Thread* current = Thread::current();
144   while (lock_index-- > 0) {
145     const oop obj = lock_order.at(lock_index);
146     const markWord mark = obj->mark_acquire();
147     assert(obj->is_locked(), "must be locked");
148     ObjectMonitor* monitor = nullptr;
149     if (mark.has_monitor()) {
150       monitor = LightweightSynchronizer::read_monitor(current, obj);
151     }
152     if (top_index > 0 && obj == _base[top_index - 1]) {
153       assert(mark.is_fast_locked() || monitor->is_owner_anonymous(),
154              "must be fast_locked or inflated by other thread");
155       top_index--;
156     } else {
157       assert(!mark.is_fast_locked(), "must be inflated");
158       assert(monitor->owner_raw() == thread ||
159              (!leaf_frame && thread->current_waiting_monitor() == monitor),
160              "must be owned by (or waited on by) thread");
161       assert(!contains(obj), "must not be on lock_stack");
162     }
163   }
164 }
165 #endif
166 
167 void LockStack::print_on(outputStream* st) {
168   for (int i = to_index(_top); (--i) >= 0;) {
169     st->print("LockStack[%d]: ", i);
170     oop o = _base[i];
171     if (oopDesc::is_oop(o)) {
172       o->print_on(st);
173     } else {
174       st->print_cr("not an oop: " PTR_FORMAT, p2i(o));
175     }
176   }
177 }
178 
179 OMCache::OMCache(JavaThread* jt) : _entries() {
180   STATIC_ASSERT(std::is_standard_layout<OMCache>::value);
181   STATIC_ASSERT(std::is_standard_layout<OMCache::OMCacheEntry>::value);
182   STATIC_ASSERT(offsetof(OMCache, _null_sentinel) == offsetof(OMCache, _entries) +
183                 offsetof(OMCache::OMCacheEntry, _oop) +
184                 OMCache::CAPACITY * in_bytes(oop_to_oop_difference()));
185 }