1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/codeCache.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetAssembler.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "logging/log.hpp"
 32 #include "memory/iterator.hpp"
 33 #include "memory/universe.hpp"
 34 #include "oops/access.inline.hpp"
 35 #include "oops/method.inline.hpp"
 36 #include "runtime/frame.inline.hpp"
 37 #include "runtime/javaThread.hpp"
 38 #include "runtime/threads.hpp"
 39 #include "runtime/threadWXSetters.inline.hpp"
 40 #include "utilities/debug.hpp"
 41 #if INCLUDE_JVMCI
 42 #include "jvmci/jvmciRuntime.hpp"
 43 #endif
 44 
 45 int BarrierSetNMethod::disarmed_guard_value() const {
 46   return *disarmed_guard_value_address();
 47 }
 48 
 49 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
 50   if (nm->method()->is_method_handle_intrinsic()) {
 51     return false;
 52   }
 53 
 54   if (nm->method()->is_continuation_enter_intrinsic()) {
 55     return false;
 56   }
 57 
 58   if (nm->method()->is_continuation_yield_intrinsic()) {
 59     return false;
 60   }
 61 
 62   if (nm->method()->is_continuation_native_intrinsic()) {
 63     guarantee(false, "Unknown Continuation native intrinsic");
 64     return false;
 65   }
 66 
 67   if (nm->is_native_method() || nm->is_compiled_by_c2() || nm->is_compiled_by_c1() || nm->is_compiled_by_jvmci()) {
 68     return true;
 69   }
 70 
 71   return false;
 72 }
 73 
 74 void BarrierSetNMethod::disarm(nmethod* nm) {
 75   guard_with(nm, disarmed_guard_value());
 76 }
 77 
 78 void BarrierSetNMethod::guard_with(nmethod* nm, int value) {
 79   assert((value & not_entrant) == 0, "not_entrant bit is reserved");
 80   // Enter critical section.  Does not block for safepoint.
 81   ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
 82   // Do not undo sticky bit
 83   if (is_not_entrant(nm)) {
 84     value |= not_entrant;
 85   }
 86   if (guard_value(nm) != value) {
 87     // Patch the code only if needed.
 88     set_guard_value(nm, value);
 89   }
 90 }
 91 
 92 bool BarrierSetNMethod::is_armed(nmethod* nm) {
 93   return (guard_value(nm) & ~not_entrant) != disarmed_guard_value();
 94 }
 95 
 96 bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
 97   class OopKeepAliveClosure : public OopClosure {
 98   public:
 99     virtual void do_oop(oop* p) {
100       // Loads on nmethod oops are phantom strength.
101       //
102       // Note that we could have used NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(p),
103       // but that would have *required* us to convert the returned LoadOopProxy to an oop,
104       // or else keep alive load barrier will never be called. It's the LoadOopProxy-to-oop
105       // conversion that performs the load barriers. This is too subtle, so we instead
106       // perform an explicit keep alive call.
107       oop obj = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(p);
108       if (obj != nullptr) {
109         Universe::heap()->keep_alive(obj);
110       }
111     }
112 
113     virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
114   };
115 
116   if (!is_armed(nm)) {
117     // Some other thread got here first and healed the oops
118     // and disarmed the nmethod. No need to continue.
119     return true;
120   }
121 
122   // If the nmethod is the only thing pointing to the oops, and we are using a
123   // SATB GC, then it is important that this code marks them live.
124   // Also, with concurrent GC, it is possible that frames in continuation stack
125   // chunks are not visited if they are allocated after concurrent GC started.
126   OopKeepAliveClosure cl;
127   nm->oops_do(&cl);
128 
129   // CodeCache unloading support
130   nm->mark_as_maybe_on_stack();
131 
132   disarm(nm);
133 
134   return true;
135 }
136 
137 int* BarrierSetNMethod::disarmed_guard_value_address() const {
138   return (int*) &_current_phase;
139 }
140 
141 ByteSize BarrierSetNMethod::thread_disarmed_guard_value_offset() const {
142   return Thread::nmethod_disarmed_guard_value_offset();
143 }
144 
145 class BarrierSetNMethodArmClosure : public ThreadClosure {
146 private:
147   int _disarmed_guard_value;
148 
149 public:
150   BarrierSetNMethodArmClosure(int disarmed_guard_value) :
151       _disarmed_guard_value(disarmed_guard_value) {}
152 
153   virtual void do_thread(Thread* thread) {
154     thread->set_nmethod_disarmed_guard_value(_disarmed_guard_value);
155   }
156 };
157 
158 void BarrierSetNMethod::arm_all_nmethods() {
159   // Change to a new global GC phase. Doing this requires changing the thread-local
160   // disarm value for all threads, to reflect the new GC phase.
161   // We wrap around at INT_MAX. That means that we assume nmethods won't have ABA
162   // problems in their nmethod disarm values after INT_MAX - 1 GCs. Every time a GC
163   // completes, ABA problems are removed, but if a concurrent GC is started and then
164   // aborted N times, that is when there could be ABA problems. If there are anything
165   // close to INT_MAX - 1 GCs starting without being able to finish, something is
166   // seriously wrong.
167   ++_current_phase;
168   if (_current_phase == INT_MAX) {
169     _current_phase = initial;
170   }
171   BarrierSetNMethodArmClosure cl(_current_phase);
172   Threads::threads_do(&cl);
173 
174 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
175   // We clear the patching epoch when disarming nmethods, so that
176   // the counter won't overflow.
177   BarrierSetAssembler::clear_patching_epoch();
178 #endif
179 }
180 
181 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
182   // Enable WXWrite: the function is called directly from nmethod_entry_barrier
183   // stub.
184   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
185 
186   address return_address = *return_address_ptr;
187   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
188   CodeBlob* cb = CodeCache::find_blob(return_address);
189   assert(cb != nullptr, "invariant");
190 
191   nmethod* nm = cb->as_nmethod();
192   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
193 
194   log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
195   // Called upon first entry after being armed
196   bool may_enter = !bs_nm->is_not_entrant(nm) && bs_nm->nmethod_entry_barrier(nm);
197   assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration");
198 
199   if (may_enter) {
200     // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
201     // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
202     // code, where the existence of new instructions is communicated via data (the guard value).
203     // This cross modify fence is only needed when the nmethod entry barrier modifies the
204     // instructions. Not all platforms currently do that, so if this check becomes expensive,
205     // it can be made conditional on the nmethod_patching_type.
206     OrderAccess::cross_modify_fence();
207 
208     // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise
209     // a very rare event.
210     if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) {
211       static volatile uint32_t counter=0;
212       if (Atomic::add(&counter, 1u) % 10 == 0) {
213         may_enter = false;
214       }
215     }
216   }
217 
218   if (may_enter) {
219     nm->set_used();
220   } else {
221     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
222     bs_nm->deoptimize(nm, return_address_ptr);
223   }
224   return may_enter ? 0 : 1;
225 }
226 
227 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
228   assert(nm->is_osr_method(), "Should not reach here");
229   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
230   bool result = nmethod_entry_barrier(nm);
231   if (result) {
232     nm->set_used();
233   }
234   OrderAccess::cross_modify_fence();
235   return result;
236 }
237 
238 oop BarrierSetNMethod::oop_load_no_keepalive(const nmethod* nm, int index) {
239   return NativeAccess<AS_NO_KEEPALIVE>::oop_load(nm->oop_addr_at(index));
240 }
241 
242 oop BarrierSetNMethod::oop_load_phantom(const nmethod* nm, int index) {
243   return NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(nm->oop_addr_at(index));
244 }
245 
246 // Make the nmethod permanently not-entrant, so that nmethod_stub_entry_barrier() will call
247 // deoptimize() to redirect the caller to SharedRuntime::get_handle_wrong_method_stub().
248 // A sticky armed bit is set and other bits are preserved.  As a result, a call to
249 // nmethod_stub_entry_barrier() may appear to be spurious, because is_armed() still returns
250 // false and nmethod_entry_barrier() is not called.
251 void BarrierSetNMethod::make_not_entrant(nmethod* nm) {
252   // Enter critical section.  Does not block for safepoint.
253   ConditionalMutexLocker ml(NMethodEntryBarrier_lock, !NMethodEntryBarrier_lock->owned_by_self(), Mutex::_no_safepoint_check_flag);
254   int value = guard_value(nm) | not_entrant;
255   if (guard_value(nm) != value) {
256     // Patch the code only if needed.
257     set_guard_value(nm, value);
258   }
259 }
260 
261 bool BarrierSetNMethod::is_not_entrant(nmethod* nm) {
262   return (guard_value(nm) & not_entrant) != 0;
263 }