1 /*
  2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/codeCache.hpp"
 27 #include "code/nmethod.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetNMethod.hpp"
 30 #include "logging/log.hpp"
 31 #include "oops/access.inline.hpp"
 32 #include "runtime/thread.hpp"
 33 #include "runtime/threadWXSetters.inline.hpp"
 34 #include "utilities/debug.hpp"
 35 
 36 class LoadPhantomOopClosure : public OopClosure {
 37 public:
 38   virtual void do_oop(oop* p) {
 39     NativeAccess<ON_PHANTOM_OOP_REF>::oop_load(p);
 40   }
 41   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 42 };
 43 
 44 int BarrierSetNMethod::disarmed_value() const {
 45   return *disarmed_value_address();
 46 }
 47 
 48 bool BarrierSetNMethod::supports_entry_barrier(nmethod* nm) {
 49   if (nm->method()->is_method_handle_intrinsic()) {
 50     return false;
 51   }
 52 
 53   if (nm->method()->is_continuation_enter_intrinsic()) {
 54     return false;
 55   }
 56 
 57   if (!nm->is_native_method() && !nm->is_compiled_by_c2() && !nm->is_compiled_by_c1()) {
 58     return false;
 59   }
 60 
 61   return true;
 62 }
 63 
 64 bool BarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
 65   nm->mark_as_maybe_on_continuation();
 66   LoadPhantomOopClosure cl;
 67   nm->oops_do(&cl);
 68   disarm(nm);
 69 
 70   return true;
 71 }
 72 
 73 int* BarrierSetNMethod::disarmed_value_address() const {
 74   return (int*) &_current_phase;
 75 }
 76 
 77 ByteSize BarrierSetNMethod::thread_disarmed_offset() const {
 78   return Thread::nmethod_disarmed_offset();
 79 }
 80 
 81 class BarrierSetNMethodArmClosure : public ThreadClosure {
 82 private:
 83   int _disarm_value;
 84 
 85 public:
 86   BarrierSetNMethodArmClosure(int disarm_value) :
 87     _disarm_value(disarm_value) { }
 88 
 89   virtual void do_thread(Thread* thread) {
 90     thread->set_nmethod_disarm_value(_disarm_value);
 91   }
 92 };
 93 
 94 void BarrierSetNMethod::arm_all_nmethods() {
 95   ++_current_phase;
 96   if (_current_phase == 4) {
 97     _current_phase = 1;
 98   }
 99   BarrierSetNMethodArmClosure cl(_current_phase);
100   Threads::threads_do(&cl);
101 }
102 
103 int BarrierSetNMethod::nmethod_stub_entry_barrier(address* return_address_ptr) {
104   // Enable WXWrite: the function is called directly from nmethod_entry_barrier
105   // stub.
106   MACOS_AARCH64_ONLY(ThreadWXEnable wx(WXWrite, Thread::current()));
107 
108   address return_address = *return_address_ptr;
109   CodeBlob* cb = CodeCache::find_blob(return_address);
110   assert(cb != NULL, "invariant");
111 
112   nmethod* nm = cb->as_nmethod();
113   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
114 
115   if (!bs_nm->is_armed(nm)) {
116     return 0;
117   }
118 
119   assert(!nm->is_osr_method(), "Should not reach here");
120   // Called upon first entry after being armed
121   bool may_enter = bs_nm->nmethod_entry_barrier(nm);
122 
123   // Diagnostic option to force deoptimization 1 in 3 times. It is otherwise
124   // a very rare event.
125   if (DeoptimizeNMethodBarriersALot) {
126     static volatile uint32_t counter=0;
127     if (Atomic::add(&counter, 1u) % 3 == 0) {
128       may_enter = false;
129     }
130   }
131 
132   if (!may_enter) {
133     log_trace(nmethod, barrier)("Deoptimizing nmethod: " PTR_FORMAT, p2i(nm));
134     bs_nm->deoptimize(nm, return_address_ptr);
135   }
136   return may_enter ? 0 : 1;
137 }
138 
139 bool BarrierSetNMethod::nmethod_osr_entry_barrier(nmethod* nm) {
140   // This check depends on the invariant that all nmethods that are deoptimized / made not entrant
141   // are NOT disarmed.
142   // This invariant is important because a method can be deoptimized after the method have been
143   // resolved / looked up by OSR by another thread. By not deoptimizing them we guarantee that
144   // a deoptimized method will always hit the barrier and come to the same conclusion - deoptimize
145   if (!is_armed(nm)) {
146     return true;
147   }
148 
149   assert(nm->is_osr_method(), "Should not reach here");
150   log_trace(nmethod, barrier)("Running osr nmethod entry barrier: " PTR_FORMAT, p2i(nm));
151   return nmethod_entry_barrier(nm);
152 }
--- EOF ---