1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/codeCache.hpp" 26 #include "code/nmethod.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetAssembler.hpp" 29 #include "gc/shared/barrierSetNMethod.hpp" 30 #include "gc/shared/collectedHeap.hpp" 31 #include "logging/log.hpp" 32 #include "memory/iterator.hpp" 33 #include "memory/universe.hpp" 34 #include "oops/access.inline.hpp" 35 #include "oops/method.inline.hpp" 36 #include "runtime/frame.inline.hpp" 37 #include "runtime/javaThread.hpp" 38 #include "runtime/threadWXSetters.inline.hpp" 39 #include "runtime/threads.hpp" 40 #include "utilities/debug.hpp" 41 #if INCLUDE_JVMCI 42 #include "jvmci/jvmciRuntime.hpp" 43 #endif 44 45 int BarrierSetNMethod::disarmed_guard_value() const { 46 return *disarmed_guard_value_address(); 47 } 48 49 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) { 50 if (nm->method()->is_method_handle_intrinsic()) { 51 return false; 52 } 53 54 if (nm->method()->is_continuation_enter_intrinsic()) { 55 return false; 56 } 57 58 if (nm->method()->is_continuation_yield_intrinsic()) { 59 return false; 60 } 61 62 if (nm->method()->is_continuation_native_intrinsic()) { 63 guarantee(false, "Unknown Continuation native intrinsic"); 64 return false; 65 } 66 67 if (nm->is_native_method() || nm->is_compiled_by_c2() || nm->is_compiled_by_c1() || nm->is_compiled_by_jvmci()) { 68 return true; 69 } 70 71 return false; 72 } 73 74 void BarrierSetNMethod::disarm(nmethod* nm) { 75 set_guard_value(nm, disarmed_guard_value()); 76 } 77 78 bool BarrierSetNMethod::is_armed(nmethod* nm) { 79 return guard_value(nm) != disarmed_guard_value(); 80 } 81 82 bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) { 83 class OopKeepAliveClosure : public OopClosure { 84 public: 85 virtual void do_oop(oop* p) { 86 // Loads on nmethod oops are phantom strength. 87 // 88 // Note that we could have used NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(p), 89 // but that would have *required* us to convert the returned LoadOopProxy to an oop, 90 // or else keep alive load barrier will never be called. It's the LoadOopProxy-to-oop 91 // conversion that performs the load barriers. This is too subtle, so we instead 92 // perform an explicit keep alive call. 93 oop obj = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(p); 94 if (obj != nullptr) { 95 Universe::heap()->keep_alive(obj); 96 } 97 } 98 99 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } 100 }; 101 102 if (!is_armed(nm)) { 103 // Some other thread got here first and healed the oops 104 // and disarmed the nmethod. No need to continue. 105 return true; 106 } 107 108 // If the nmethod is the only thing pointing to the oops, and we are using a 109 // SATB GC, then it is important that this code marks them live. 110 // Also, with concurrent GC, it is possible that frames in continuation stack 111 // chunks are not visited if they are allocated after concurrent GC started. 112 OopKeepAliveClosure cl; 113 nm->oops_do(&cl); 114 115 // CodeCache unloading support 116 nm->mark_as_maybe_on_stack(); 117 118 disarm(nm); 119 120 return true; 121 } 122 123 int* BarrierSetNMethod::disarmed_guard_value_address() const { 124 return (int*) &_current_phase; 125 } 126 127 ByteSize BarrierSetNMethod::thread_disarmed_guard_value_offset() const { 128 return Thread::nmethod_disarmed_guard_value_offset(); 129 } 130 131 class BarrierSetNMethodArmClosure : public ThreadClosure { 132 private: 133 int _disarmed_guard_value; 134 135 public: 136 BarrierSetNMethodArmClosure(int disarmed_guard_value) : 137 _disarmed_guard_value(disarmed_guard_value) {} 138 139 virtual void do_thread(Thread* thread) { 140 thread->set_nmethod_disarmed_guard_value(_disarmed_guard_value); 141 } 142 }; 143 144 void BarrierSetNMethod::arm_all_nmethods() { 145 // Change to a new global GC phase. Doing this requires changing the thread-local 146 // disarm value for all threads, to reflect the new GC phase. 147 // We wrap around at INT_MAX. That means that we assume nmethods won't have ABA 148 // problems in their nmethod disarm values after INT_MAX - 1 GCs. Every time a GC 149 // completes, ABA problems are removed, but if a concurrent GC is started and then 150 // aborted N times, that is when there could be ABA problems. If there are anything 151 // close to INT_MAX - 1 GCs starting without being able to finish, something is 152 // seriously wrong. 153 ++_current_phase; 154 if (_current_phase == INT_MAX) { 155 _current_phase = 1; 156 } 157 BarrierSetNMethodArmClosure cl(_current_phase); 158 Threads::threads_do(&cl); 159 160 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO) 161 // We clear the patching epoch when disarming nmethods, so that 162 // the counter won't overflow. 163 BarrierSetAssembler::clear_patching_epoch(); 164 #endif 165 } 166 167 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) { 168 // Enable WXWrite: the function is called directly from nmethod_entry_barrier 169 // stub. 170 MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current())); 171 172 address return_address = *return_address_ptr; 173 AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address)); 174 CodeBlob* cb = CodeCache::find_blob(return_address); 175 assert(cb != nullptr, "invariant"); 176 177 nmethod* nm = cb->as_nmethod(); 178 BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); 179 180 // Called upon first entry after being armed 181 bool may_enter = bs_nm->nmethod_entry_barrier(nm); 182 assert(!nm->is_osr_method() || may_enter, "OSR nmethods should always be entrant after migration"); 183 184 // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions 185 // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying 186 // code, where the existence of new instructions is communicated via data (the guard value). 187 // This cross modify fence is only needed when the nmethod entry barrier modifies the 188 // instructions. Not all platforms currently do that, so if this check becomes expensive, 189 // it can be made conditional on the nmethod_patching_type. 190 OrderAccess::cross_modify_fence(); 191 192 // Diagnostic option to force deoptimization 1 in 10 times. It is otherwise 193 // a very rare event. 194 if (DeoptimizeNMethodBarriersALot && !nm->is_osr_method()) { 195 static volatile uint32_t counter=0; 196 if (Atomic::add(&counter, 1u) % 10 == 0) { 197 may_enter = false; 198 } 199 } 200 201 if (!may_enter) { 202 log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm)); 203 bs_nm->deoptimize(nm, return_address_ptr); 204 } 205 return may_enter ? 0 : 1; 206 } 207 208 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) { 209 assert(nm->is_osr_method(), "Should not reach here"); 210 log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm)); 211 bool result = nmethod_entry_barrier(nm); 212 OrderAccess::cross_modify_fence(); 213 return result; 214 }