1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_SHARED_GCLOCKER_HPP
  26 #define SHARE_GC_SHARED_GCLOCKER_HPP
  27 
  28 #include "gc/shared/gcCause.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "utilities/globalDefinitions.hpp"
  31 #include "utilities/macros.hpp"
  32 
  33 class JavaThread;
  34 
  35 // The direct lock/unlock calls do not force a collection if an unlock
  36 // decrements the count to zero. Avoid calling these if at all possible.
  37 
  38 class GCLocker: public AllStatic {
  39  private:
  40   // The _jni_lock_count keeps track of the number of threads that are
  41   // currently in a critical region.  It's only kept up to date when
  42   // _needs_gc is true.  The current value is computed during
  43   // safepointing and decremented during the slow path of GCLocker
  44   // unlocking.
  45   static volatile jint _jni_lock_count;  // number of jni active instances.
  46   static volatile bool _needs_gc;        // heap is filling, we need a GC
  47                                          // note: bool is typedef'd as jint
  48   static volatile bool _doing_gc;        // unlock_critical() is doing a GC
  49   static uint _total_collections;        // value for _gc_locker collection
  50 
  51 #ifdef ASSERT
  52   // This lock count is updated for all operations and is used to
  53   // validate the jni_lock_count that is computed during safepoints.
  54   static volatile jint _debug_jni_lock_count;
  55 #endif
  56 
  57   // At a safepoint, visit all threads and count the number of active
  58   // critical sections.  This is used to ensure that all active
  59   // critical sections are exited before a new one is started.
  60   static void verify_critical_count() NOT_DEBUG_RETURN;
  61 
  62   static void jni_lock(JavaThread* thread);
  63   static void jni_unlock(JavaThread* thread);
  64 
  65   static bool is_active_internal() {
  66     verify_critical_count();
  67     return _jni_lock_count > 0;
  68   }
  69 
  70   static void log_debug_jni(const char* msg);
  71 
  72   static bool is_at_safepoint();
  73 
  74  public:
  75   // Accessors
  76   static bool is_active() {
  77     assert(GCLocker::is_at_safepoint(), "only read at safepoint");
  78     return is_active_internal();
  79   }
  80   static bool needs_gc()       { return _needs_gc;                        }
  81 
  82   // Shorthand
  83   static bool is_active_and_needs_gc() {
  84     // Use is_active_internal since _needs_gc can change from true to
  85     // false outside of a safepoint, triggering the assert in
  86     // is_active.
  87     return needs_gc() && is_active_internal();
  88   }
  89 
  90   // In debug mode track the locking state at all times
  91   static void increment_debug_jni_lock_count() NOT_DEBUG_RETURN;
  92   static void decrement_debug_jni_lock_count() NOT_DEBUG_RETURN;
  93 
  94   // Set the current lock count
  95   static void set_jni_lock_count(int count) {
  96     _jni_lock_count = count;
  97     verify_critical_count();
  98   }
  99 
 100   // Sets _needs_gc if is_active() is true. Returns is_active().
 101   static bool check_active_before_gc();
 102 
 103   // Return true if the designated collection is a GCLocker request
 104   // that should be discarded.  Returns true if cause == GCCause::_gc_locker
 105   // and the given total collection value indicates a collection has been
 106   // done since the GCLocker request was made.
 107   static bool should_discard(GCCause::Cause cause, uint total_collections);
 108 
 109   // Stalls the caller (who should not be in a jni critical section)
 110   // until needs_gc() clears. Note however that needs_gc() may be
 111   // set at a subsequent safepoint and/or cleared under the
 112   // JNICritical_lock, so the caller may not safely assert upon
 113   // return from this method that "!needs_gc()" since that is
 114   // not a stable predicate.
 115   static void stall_until_clear();
 116 
 117   // The following two methods are used for JNI critical regions.
 118   // If we find that we failed to perform a GC because the GCLocker
 119   // was active, arrange for one as soon as possible by allowing
 120   // all threads in critical regions to complete, but not allowing
 121   // other critical regions to be entered. The reasons for that are:
 122   // 1) a GC request won't be starved by overlapping JNI critical
 123   //    region activities, which can cause unnecessary OutOfMemory errors.
 124   // 2) even if allocation requests can still be satisfied before GC locker
 125   //    becomes inactive, for example, in tenured generation possibly with
 126   //    heap expansion, those allocations can trigger lots of safepointing
 127   //    attempts (ineffective GC attempts) and require Heap_lock which
 128   //    slow down allocations tremendously.
 129   //
 130   // Note that critical regions can be nested in a single thread, so
 131   // we must allow threads already in critical regions to continue.
 132   //
 133   // JNI critical regions are the only participants in this scheme
 134   // because they are, by spec, well bounded while in a critical region.
 135   //
 136   // Each of the following two method is split into a fast path and a
 137   // slow path. JNICritical_lock is only grabbed in the slow path.
 138   // _needs_gc is initially false and every java thread will go
 139   // through the fast path, which simply increments or decrements the
 140   // current thread's critical count.  When GC happens at a safepoint,
 141   // GCLocker::is_active() is checked. Since there is no safepoint in
 142   // the fast path of lock_critical() and unlock_critical(), there is
 143   // no race condition between the fast path and GC. After _needs_gc
 144   // is set at a safepoint, every thread will go through the slow path
 145   // after the safepoint.  Since after a safepoint, each of the
 146   // following two methods is either entered from the method entry and
 147   // falls into the slow path, or is resumed from the safepoints in
 148   // the method, which only exist in the slow path. So when _needs_gc
 149   // is set, the slow path is always taken, till _needs_gc is cleared.
 150   inline static void lock_critical(JavaThread* thread);
 151   inline static void unlock_critical(JavaThread* thread);
 152 
 153   static address needs_gc_address() { return (address) &_needs_gc; }
 154 };
 155 
 156 #endif // SHARE_GC_SHARED_GCLOCKER_HPP