1 /*
2 * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "code/codeCache.hpp"
26 #include "code/nmethod.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "logging/log.hpp"
32 #include "memory/iterator.hpp"
33 #include "memory/universe.hpp"
34 #include "oops/access.inline.hpp"
35 #include "oops/method.inline.hpp"
36 #include "runtime/atomic.hpp"
37 #include "runtime/frame.inline.hpp"
38 #include "runtime/javaThread.hpp"
39 #include "runtime/threads.hpp"
40 #include "runtime/threadWXSetters.inline.hpp"
41 #include "utilities/debug.hpp"
42 #if INCLUDE_JVMCI
43 #include "jvmci/jvmciRuntime.hpp"
44 #endif
45
46 int BarrierSetNMethod::disarmed_guard_value() const {
47 return *disarmed_guard_value_address();
48 }
49
50 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
51 if (nm->method()->is_method_handle_intrinsic()) {
52 return false;
53 }
54
55 if (nm->method()->is_continuation_enter_intrinsic()) {
56 return false;
57 }
58
59 if (nm->method()->is_continuation_yield_intrinsic()) {
60 return false;
61 }
62
63 if (nm->method()->is_continuation_native_intrinsic()) {
64 guarantee(false, "Unknown Continuation native intrinsic");
65 return false;
66 }
67
68 if (nm->is_native_method() || nm->is_compiled_by_c2() || nm->is_compiled_by_c1() || nm->is_compiled_by_jvmci()) {
69 return true;
70 }
71
72 return false;
73 }
74
75 void BarrierSetNMethod::disarm(nmethod* nm) {
76 set_guard_value(nm, disarmed_guard_value());
77 }
78
79 void BarrierSetNMethod::guard_with(nmethod* nm, int value) {
80 assert((value & not_entrant) == 0, "not_entrant bit is reserved");
81 set_guard_value(nm, value);
82 }
83
84 bool BarrierSetNMethod::is_armed(nmethod* nm) {
85 return (guard_value(nm) & ~not_entrant) != disarmed_guard_value();
86 }
87
88 bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
89 class OopKeepAliveClosure : public OopClosure {
90 public:
91 virtual void do_oop(oop* p) {
92 // Loads on nmethod oops are phantom strength.
93 //
94 // Note that we could have used NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(p),
95 // but that would have *required* us to convert the returned LoadOopProxy to an oop,
96 // or else keep alive load barrier will never be called. It's the LoadOopProxy-to-oop
97 // conversion that performs the load barriers. This is too subtle, so we instead
98 // perform an explicit keep alive call.
99 oop obj = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(p);
100 if (obj != nullptr) {
101 Universe::heap()->keep_alive(obj);
102 }
103 }
104
105 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
106 };
107
108 if (!is_armed(nm)) {
109 // Some other thread got here first and healed the oops
110 // and disarmed the nmethod. No need to continue.
111 return true;
112 }
113
114 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
115
116 // If the nmethod is the only thing pointing to the oops, and we are using a
117 // SATB GC, then it is important that this code marks them live.
118 // Also, with concurrent GC, it is possible that frames in continuation stack
119 // chunks are not visited if they are allocated after concurrent GC started.
120 OopKeepAliveClosure cl;
121 nm->oops_do(&cl);
122
123 // CodeCache unloading support
124 nm->mark_as_maybe_on_stack();
125
126 disarm(nm);
127
128 return true;
129 }
130
131 int* BarrierSetNMethod::disarmed_guard_value_address() const {
132 return (int*) &_current_phase;
133 }
134
135 ByteSize BarrierSetNMethod::thread_disarmed_guard_value_offset() const {
136 return Thread::nmethod_disarmed_guard_value_offset();
137 }
138
139 class BarrierSetNMethodArmClosure : public ThreadClosure {
140 private:
141 int _disarmed_guard_value;
142
143 public:
144 BarrierSetNMethodArmClosure(int disarmed_guard_value) :
145 _disarmed_guard_value(disarmed_guard_value) {}
146
147 virtual void do_thread(Thread* thread) {
148 thread->set_nmethod_disarmed_guard_value(_disarmed_guard_value);
149 }
150 };
151
152 void BarrierSetNMethod::arm_all_nmethods() {
153 // Change to a new global GC phase. Doing this requires changing the thread-local
154 // disarm value for all threads, to reflect the new GC phase.
155 // We wrap around at INT_MAX. That means that we assume nmethods won't have ABA
156 // problems in their nmethod disarm values after INT_MAX - 1 GCs. Every time a GC
157 // completes, ABA problems are removed, but if a concurrent GC is started and then
158 // aborted N times, that is when there could be ABA problems. If there are anything
159 // close to INT_MAX - 1 GCs starting without being able to finish, something is
160 // seriously wrong.
161 ++_current_phase;
162 if (_current_phase == INT_MAX) {
163 _current_phase = initial;
164 }
165 BarrierSetNMethodArmClosure cl(_current_phase);
166 Threads::threads_do(&cl);
167
168 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
169 // We clear the patching epoch when disarming nmethods, so that
170 // the counter won't overflow.
171 BarrierSetAssembler::clear_patching_epoch();
172 #endif
173 }
174
175 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
176 address return_address = *return_address_ptr;
177 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
178 CodeBlob* cb = CodeCache::find_blob(return_address);
179 assert(cb != nullptr, "invariant");
180
181 nmethod* nm = cb->as_nmethod();
182 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
183
184 // Called upon first entry after being armed
185 bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
186 assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
187
188 if (may_enter) {
189 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
190 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
191 // code, where the existence of new instructions is communicated via data (the guard value).
192 // This cross modify fence is only needed when the nmethod entry barrier modifies the
193 // instructions. Not all platforms currently do that, so if this check becomes expensive,
194 // it can be made conditional on the nmethod_patching_type.
195 OrderAccess::cross_modify_fence();
196
197 // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
198 // a very rare event.
199 if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
200 static Atomic<uint32_t> counter{0};
201 if (counter.add_then_fetch(1u) % 10 == 0) {
202 may_enter = false;
203 }
204 }
205 }
206
207 if (!may_enter) {
208 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
209 bs_nm->deoptimize(nm, return_address_ptr);
210 }
211 return may_enter ? 0 : 1;
212 }
213
214 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
215 assert(nm->is_osr_method(), "Should not reach here");
216 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
217 bool result = nmethod_entry_barrier(nm);
218 OrderAccess::cross_modify_fence();
219 return result;
220 }
221
222 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
223 return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
224 }
225
226 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
227 return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
228 }
229
230 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
231 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
232 // A sticky armed bit is set and other bits are preserved. As a result, a call to
233 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
234 // false and nmethod_entry_barrier() is not called.
235 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
236 set_guard_value(nm, not_entrant, not_entrant);
237 }
238
239 bool BarrierSetNMethod::is_not_entrant(nmethod* nm) {
240 return (guard_value(nm) & not_entrant) != 0;
241 }