1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nmethod.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetAssembler.hpp"
 30 #include "gc/shared/barrierSetNMethod.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "logging/log.hpp"
 33 #include "memory/iterator.hpp"
 34 #include "memory/universe.hpp"
 35 #include "oops/access.inline.hpp"
 36 #include "oops/method.inline.hpp"
 37 #include "runtime/frame.inline.hpp"
 38 #include "runtime/javaThread.hpp"
 39 #include "runtime/threadWXSetters.inline.hpp"
 40 #include "runtime/threads.hpp"
 41 #include "utilities/debug.hpp"
 42 #if INCLUDE_JVMCI
 43 #include "jvmci/jvmciRuntime.hpp"
 44 #endif
 45 
 46 int BarrierSetNMethod::disarmed_guard_value() const {
 47   return *disarmed_guard_value_address();
 48 }
 49 
 50 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
 51   if (nm->method()->is_method_handle_intrinsic()) {
 52     return false;
 53   }
 54 
 55   if (nm->method()->is_continuation_enter_intrinsic()) {
 56     return false;
 57   }
 58 
 59   if (nm->method()->is_continuation_yield_intrinsic()) {
 60     return false;
 61   }
 62 
 63   if (nm->method()->is_continuation_native_intrinsic()) {
 64     guarantee(false, "Unknown Continuation native intrinsic");
 65     return false;
 66   }
 67 
 68   if (nm->is_native_method() || nm->is_compiled_by_c2() || nm->is_compiled_by_c1() || nm->is_compiled_by_jvmci()) {
 69     return true;
 70   }
 71 
 72   return false;
 73 }
 74 
 75 void BarrierSetNMethod::disarm(nmethod* nm) {
 76   set_guard_value(nm, disarmed_guard_value());
 77 }
 78 
 79 bool BarrierSetNMethod::is_armed(nmethod* nm) {
 80   return guard_value(nm) != disarmed_guard_value();
 81 }
 82 
 83 bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
 84   class OopKeepAliveClosure : public OopClosure {
 85   public:
 86     virtual void do_oop(oop* p) {
 87       // Loads on nmethod oops are phantom strength.
 88       //
 89       // Note that we could have used NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(p),
 90       // but that would have *required* us to convert the returned LoadOopProxy to an oop,
 91       // or else keep alive load barrier will never be called. It's the LoadOopProxy-to-oop
 92       // conversion that performs the load barriers. This is too subtle, so we instead
 93       // perform an explicit keep alive call.
 94       oop obj = NativeAccess<ON_PHANTOM_OOP_REF | AS_NO_KEEPALIVE>::oop_load(p);
 95       if (obj != nullptr) {
 96         Universe::heap()->keep_alive(obj);
 97       }
 98     }
 99 
100     virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
101   };
102 
103   // If the nmethod is the only thing pointing to the oops, and we are using a
104   // SATB GC, then it is important that this code marks them live.
105   // Also, with concurrent GC, it is possible that frames in continuation stack
106   // chunks are not visited if they are allocated after concurrent GC started.
107   OopKeepAliveClosure cl;
108   nm->oops_do(&cl);
109 
110   // CodeCache unloading support
111   nm->mark_as_maybe_on_stack();
112 
113   disarm(nm);
114 
115   return true;
116 }
117 
118 int* BarrierSetNMethod::disarmed_guard_value_address() const {
119   return (int*) &_current_phase;
120 }
121 
122 ByteSize BarrierSetNMethod::thread_disarmed_guard_value_offset() const {
123   return Thread::nmethod_disarmed_guard_value_offset();
124 }
125 
126 class BarrierSetNMethodArmClosure : public ThreadClosure {
127 private:
128   int _disarmed_guard_value;
129 
130 public:
131   BarrierSetNMethodArmClosure(int disarmed_guard_value) :
132       _disarmed_guard_value(disarmed_guard_value) {}
133 
134   virtual void do_thread(Thread* thread) {
135     thread->set_nmethod_disarmed_guard_value(_disarmed_guard_value);
136   }
137 };
138 
139 void BarrierSetNMethod::arm_all_nmethods() {
140   // Change to a new global GC phase. Doing this requires changing the thread-local
141   // disarm value for all threads, to reflect the new GC phase.
142   // We wrap around at INT_MAX. That means that we assume nmethods won't have ABA
143   // problems in their nmethod disarm values after INT_MAX - 1 GCs. Every time a GC
144   // completes, ABA problems are removed, but if a concurrent GC is started and then
145   // aborted N times, that is when there could be ABA problems. If there are anything
146   // close to INT_MAX - 1 GCs starting without being able to finish, something is
147   // seriously wrong.
148   ++_current_phase;
149   if (_current_phase == INT_MAX) {
150     _current_phase = 1;
151   }
152   BarrierSetNMethodArmClosure cl(_current_phase);
153   Threads::threads_do(&cl);
154 
155 #if (defined(AARCH64) || defined(RISCV64)) && !defined(ZERO)
156   // We clear the patching epoch when disarming nmethods, so that
157   // the counter won't overflow.
158   BarrierSetAssembler::clear_patching_epoch();
159 #endif
160 }
161 
162 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
163   // Enable WXWrite: the function is called directly from nmethod_entry_barrier
164   // stub.
165   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
166 
167   address return_address = *return_address_ptr;
168   AARCH64_PORT_ONLY(return_address = pauth_strip_pointer(return_address));
169   CodeBlob* cb = CodeCache::find_blob(return_address);
170   assert(cb != nullptr, "invariant");
171 
172   nmethod* nm = cb->as_nmethod();
173   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
174 
175   if (!bs_nm->is_armed(nm)) {
176     return 0;
177   }
178 
179   assert(!nm->is_osr_method(), "Should not reach here");
180   log_trace(nmethod, barrier)("Running nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
181   // Called upon first entry after being armed
182   bool may_enter = bs_nm->nmethod_entry_barrier(nm);
183 
184   // In case a concurrent thread disarmed the nmethod, we need to ensure the new instructions
185   // are made visible, by using a cross modify fence. Note that this is synchronous cross modifying
186   // code, where the existence of new instructions is communicated via data (the guard value).
187   // This cross modify fence is only needed when the nmethod entry barrier modifies the
188   // instructions. Not all platforms currently do that, so if this check becomes expensive,
189   // it can be made conditional on the nmethod_patching_type.
190   OrderAccess::cross_modify_fence();
191 
192   // Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
193   // a very rare event.
194   if (DeoptimizeNMethodBarriersALot) {
195     static volatile uint32_t counter=0;
196     if (Atomic::add(&counter, 1u) % 3 == 0) {
197       may_enter = false;
198     }
199   }
200 
201   if (may_enter) {
202     nm->set_used();
203   } else {
204     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
205     bs_nm->deoptimize(nm, return_address_ptr);
206   }
207   return may_enter ? 0 : 1;
208 }
209 
210 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
211   // This check depends on the invariant that all nmethods that are deoptimized / made not entrant
212   // are NOT disarmed.
213   // This invariant is important because a method can be deoptimized after the method have been
214   // resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
215   // a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
216   if (!is_armed(nm)) {
217     return true;
218   }
219 
220   assert(nm->is_osr_method(), "Should not reach here");
221   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: %d " PTR_FORMAT, nm->compile_id(), p2i(nm));
222   bool result = nmethod_entry_barrier(nm);
223   if (result) {
224     nm->set_used();
225   }
226   OrderAccess::cross_modify_fence();
227   return result;
228 }