1 /*
2 * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "memory/allocation.hpp"
28 #include "runtime/lockStack.inline.hpp"
29 #include "runtime/safepoint.hpp"
30 #include "runtime/stackWatermark.hpp"
31 #include "runtime/stackWatermarkSet.inline.hpp"
32 #include "runtime/thread.hpp"
33 #include "utilities/copy.hpp"
34 #include "utilities/ostream.hpp"
35
36 const int LockStack::lock_stack_offset = in_bytes(JavaThread::lock_stack_offset());
37 const int LockStack::lock_stack_top_offset = in_bytes(JavaThread::lock_stack_top_offset());
38 const int LockStack::lock_stack_base_offset = in_bytes(JavaThread::lock_stack_base_offset());
39
40 LockStack::LockStack(JavaThread* jt) :
41 _top(lock_stack_base_offset), _base() {
42 #ifdef ASSERT
43 for (int i = 0; i < CAPACITY; i++) {
44 _base[i] = nullptr;
45 }
46 #endif
47 }
48
49 uint32_t LockStack::start_offset() {
50 int offset = lock_stack_base_offset;
51 assert(offset > 0, "must be positive offset");
52 return static_cast<uint32_t>(offset);
53 }
54
55 uint32_t LockStack::end_offset() {
56 int offset = lock_stack_base_offset + CAPACITY * oopSize;
57 assert(offset > 0, "must be positive offset");
58 return static_cast<uint32_t>(offset);
59 }
60
61 #ifndef PRODUCT
62 void LockStack::verify(const char* msg) const {
63 assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled");
64 assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset());
65 assert((_top >= start_offset()), "lockstack underflow: _top %d end_offset %d", _top, start_offset());
66 if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) {
67 int top = to_index(_top);
68 for (int i = 0; i < top; i++) {
69 assert(_base[i] != nullptr, "no zapped before top");
70 for (int j = i + 1; j < top; j++) {
71 assert(_base[i] != _base[j], "entries must be unique: %s", msg);
72 }
73 }
74 for (int i = top; i < CAPACITY; i++) {
75 assert(_base[i] == nullptr, "only zapped entries after top: i: %d, top: %d, entry: " PTR_FORMAT, i, top, p2i(_base[i]));
76 }
77 }
78 }
79 #endif
80
81 void LockStack::print_on(outputStream* st) {
82 for (int i = to_index(_top); (--i) >= 0;) {
83 st->print("LockStack[%d]: ", i);
84 oop o = _base[i];
85 if (oopDesc::is_oop(o)) {
86 o->print_on(st);
87 } else {
88 st->print_cr("not an oop: " PTR_FORMAT, p2i(o));
89 }
90 }
91 }
|
1 /*
2 * Copyright (c) 2022, Red Hat, Inc. All rights reserved.
3 * Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
4 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 *
7 * This code is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 only, as
9 * published by the Free Software Foundation.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
22 * or visit www.oracle.com if you need additional information or have any
23 * questions.
24 *
25 */
26
27 #include "precompiled.hpp"
28 #include "memory/allocation.hpp"
29 #include "oops/markWord.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/globals.hpp"
32 #include "runtime/lockStack.inline.hpp"
33 #include "runtime/objectMonitor.inline.hpp"
34 #include "runtime/safepoint.hpp"
35 #include "runtime/stackWatermark.hpp"
36 #include "runtime/stackWatermarkSet.inline.hpp"
37 #include "runtime/thread.hpp"
38 #include "utilities/copy.hpp"
39 #include "utilities/debug.hpp"
40 #include "utilities/globalDefinitions.hpp"
41 #include "utilities/growableArray.hpp"
42 #include "utilities/ostream.hpp"
43
44 #include <type_traits>
45
46 const int LockStack::lock_stack_offset = in_bytes(JavaThread::lock_stack_offset());
47 const int LockStack::lock_stack_top_offset = in_bytes(JavaThread::lock_stack_top_offset());
48 const int LockStack::lock_stack_base_offset = in_bytes(JavaThread::lock_stack_base_offset());
49
50 LockStack::LockStack(JavaThread* jt) :
51 _top(lock_stack_base_offset), _base() {
52 // Make sure the layout of the object is compatible with the emitted code's assumptions.
53 STATIC_ASSERT(sizeof(_bad_oop_sentinel) == oopSize);
54 STATIC_ASSERT(sizeof(_base[0]) == oopSize);
55 STATIC_ASSERT(std::is_standard_layout<LockStack>::value);
56 STATIC_ASSERT(offsetof(LockStack, _bad_oop_sentinel) == offsetof(LockStack, _base) - oopSize);
57 #ifdef ASSERT
58 for (int i = 0; i < CAPACITY; i++) {
59 _base[i] = nullptr;
60 }
61 #endif
62 }
63
64 uint32_t LockStack::start_offset() {
65 int offset = lock_stack_base_offset;
66 assert(offset > 0, "must be positive offset");
67 return static_cast<uint32_t>(offset);
68 }
69
70 uint32_t LockStack::end_offset() {
71 int offset = lock_stack_base_offset + CAPACITY * oopSize;
72 assert(offset > 0, "must be positive offset");
73 return static_cast<uint32_t>(offset);
74 }
75
76 #ifndef PRODUCT
77 void LockStack::verify(const char* msg) const {
78 assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled");
79 assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset());
80 assert((_top >= start_offset()), "lockstack underflow: _top %d start_offset %d", _top, start_offset());
81 if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) {
82 int top = to_index(_top);
83 for (int i = 0; i < top; i++) {
84 assert(_base[i] != nullptr, "no zapped before top");
85 if (VM_Version::supports_recursive_lightweight_locking()) {
86 oop o = _base[i];
87 for (; i < top - 1; i++) {
88 // Consecutive entries may be the same
89 if (_base[i + 1] != o) {
90 break;
91 }
92 }
93 }
94
95 for (int j = i + 1; j < top; j++) {
96 assert(_base[i] != _base[j], "entries must be unique: %s", msg);
97 }
98 }
99 for (int i = top; i < CAPACITY; i++) {
100 assert(_base[i] == nullptr, "only zapped entries after top: i: %d, top: %d, entry: " PTR_FORMAT, i, top, p2i(_base[i]));
101 }
102 }
103 }
104 #endif
105
106 #ifdef ASSERT
107 void LockStack::verify_consistent_lock_order(GrowableArray<oop>& lock_order, bool leaf_frame) const {
108 int top_index = to_index(_top);
109 int lock_index = lock_order.length();
110
111 if (!leaf_frame) {
112 // If the lock_order is not from the leaf frame we must search
113 // for the top_index which fits with the most recent fast_locked
114 // objects in the lock stack.
115 while (lock_index-- > 0) {
116 const oop obj = lock_order.at(lock_index);
117 if (contains(obj)) {
118 for (int index = 0; index < top_index; index++) {
119 if (_base[index] == obj) {
120 // Found top index
121 top_index = index + 1;
122 break;
123 }
124 }
125
126 if (VM_Version::supports_recursive_lightweight_locking()) {
127 // With recursive looks there may be more of the same object
128 while (lock_index-- > 0 && lock_order.at(lock_index) == obj) {
129 top_index++;
130 }
131 assert(top_index <= to_index(_top), "too many obj in lock_order");
132 }
133
134 break;
135 }
136 }
137
138 lock_index = lock_order.length();
139 }
140
141 while (lock_index-- > 0) {
142 const oop obj = lock_order.at(lock_index);
143 const markWord mark = obj->mark_acquire();
144 assert(obj->is_locked(), "must be locked");
145 if (top_index > 0 && obj == _base[top_index - 1]) {
146 assert(mark.is_fast_locked() || mark.monitor()->is_owner_anonymous(),
147 "must be fast_locked or inflated by other thread");
148 top_index--;
149 } else {
150 assert(!mark.is_fast_locked(), "must be inflated");
151 assert(mark.monitor()->owner_raw() == get_thread() ||
152 (!leaf_frame && get_thread()->current_waiting_monitor() == mark.monitor()),
153 "must be owned by (or waited on by) thread");
154 assert(!contains(obj), "must not be on lock_stack");
155 }
156 }
157 }
158 #endif
159
160 void LockStack::print_on(outputStream* st) {
161 for (int i = to_index(_top); (--i) >= 0;) {
162 st->print("LockStack[%d]: ", i);
163 oop o = _base[i];
164 if (oopDesc::is_oop(o)) {
165 o->print_on(st);
166 } else {
167 st->print_cr("not an oop: " PTR_FORMAT, p2i(o));
168 }
169 }
170 }
|