1 /* 2 * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 26 #define SHARE_VM_RUNTIME_SYNCHRONIZER_HPP 27 28 #include "oops/markOop.hpp" 29 #include "runtime/basicLock.hpp" 30 #include "runtime/handles.hpp" 31 #include "runtime/perfData.hpp" 32 #include "utilities/top.hpp" 33 34 35 class ObjectMonitor; 36 37 class ObjectSynchronizer : AllStatic { 38 friend class VMStructs; 39 #if INCLUDE_ALL_GCS 40 friend class ShenandoahSynchronizerIterator; 41 #endif 42 43 public: 44 typedef enum { 45 owner_self, 46 owner_none, 47 owner_other 48 } LockOwnership; 49 50 typedef enum { 51 inflate_cause_vm_internal = 0, 52 inflate_cause_monitor_enter = 1, 53 inflate_cause_wait = 2, 54 inflate_cause_notify = 3, 55 inflate_cause_hash_code = 4, 56 inflate_cause_jni_enter = 5, 57 inflate_cause_jni_exit = 6, 58 inflate_cause_nof = 7 // Number of causes 59 } InflateCause; 60 61 // exit must be implemented non-blocking, since the compiler cannot easily handle 62 // deoptimization at monitor exit. Hence, it does not take a Handle argument. 63 64 // This is full version of monitor enter and exit. I choose not 65 // to use enter() and exit() in order to make sure user be ware 66 // of the performance and semantics difference. They are normally 67 // used by ObjectLocker etc. The interpreter and compiler use 68 // assembly copies of these routines. Please keep them synchornized. 69 // 70 // attempt_rebias flag is used by UseBiasedLocking implementation 71 static void fast_enter (Handle obj, BasicLock* lock, bool attempt_rebias, TRAPS); 72 static void fast_exit (oop obj, BasicLock* lock, Thread* THREAD); 73 74 // WARNING: They are ONLY used to handle the slow cases. They should 75 // only be used when the fast cases failed. Use of these functions 76 // without previous fast case check may cause fatal error. 77 static void slow_enter (Handle obj, BasicLock* lock, TRAPS); 78 static void slow_exit (oop obj, BasicLock* lock, Thread* THREAD); 79 80 // Used only to handle jni locks or other unmatched monitor enter/exit 81 // Internally they will use heavy weight monitor. 82 static void jni_enter (Handle obj, TRAPS); 83 static bool jni_try_enter(Handle obj, Thread* THREAD); // Implements Unsafe.tryMonitorEnter 84 static void jni_exit (oop obj, Thread* THREAD); 85 86 // Handle all interpreter, compiler and jni cases 87 static void wait (Handle obj, jlong millis, TRAPS); 88 static void notify (Handle obj, TRAPS); 89 static void notifyall (Handle obj, TRAPS); 90 91 // Special internal-use-only method for use by JVM infrastructure 92 // that needs to wait() on a java-level object but that can't risk 93 // throwing unexpected InterruptedExecutionExceptions. 94 static void waitUninterruptibly (Handle obj, jlong Millis, Thread * THREAD) ; 95 96 // used by classloading to free classloader object lock, 97 // wait on an internal lock, and reclaim original lock 98 // with original recursion count 99 static intptr_t complete_exit (Handle obj, TRAPS); 100 static void reenter (Handle obj, intptr_t recursion, TRAPS); 101 102 // thread-specific and global objectMonitor free list accessors 103 // static void verifyInUse (Thread * Self) ; too slow for general assert/debug 104 static ObjectMonitor * omAlloc (Thread * Self) ; 105 static void omRelease (Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc) ; 106 static void omFlush (Thread * Self) ; 107 108 // Inflate light weight monitor to heavy weight monitor 109 static ObjectMonitor* inflate(Thread * Self, oop obj, const InflateCause cause); 110 // This version is only for internal use 111 static ObjectMonitor* inflate_helper(oop obj); 112 static const char* inflate_cause_name(const InflateCause cause); 113 114 // Returns the identity hash value for an oop 115 // NOTE: It may cause monitor inflation 116 static intptr_t identity_hash_value_for(Handle obj); 117 static intptr_t FastHashCode (Thread * Self, oop obj) ; 118 119 // java.lang.Thread support 120 static bool current_thread_holds_lock(JavaThread* thread, Handle h_obj); 121 static LockOwnership query_lock_ownership(JavaThread * self, Handle h_obj); 122 123 static JavaThread* get_lock_owner(Handle h_obj, bool doLock); 124 125 // JNI detach support 126 static void release_monitors_owned_by_thread(TRAPS); 127 static void monitors_iterate(MonitorClosure* m); 128 129 // GC: we current use aggressive monitor deflation policy 130 // Basically we deflate all monitors that are not busy. 131 // An adaptive profile-based deflation policy could be used if needed 132 static void deflate_idle_monitors(); 133 static int walk_monitor_list(ObjectMonitor** listheadp, 134 ObjectMonitor** FreeHeadp, 135 ObjectMonitor** FreeTailp); 136 static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp, 137 ObjectMonitor** FreeTailp); 138 static void oops_do(OopClosure* f); 139 140 // debugging 141 static void sanity_checks(const bool verbose, 142 const unsigned int cache_line_size, 143 int *error_cnt_ptr, int *warning_cnt_ptr); 144 static void verify() PRODUCT_RETURN; 145 static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0; 146 147 static void RegisterSpinCallback (int (*)(intptr_t, int), intptr_t) ; 148 149 private: 150 enum { _BLOCKSIZE = 128 }; 151 static ObjectMonitor * volatile gBlockList; 152 static ObjectMonitor * volatile gFreeList; 153 static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned 154 static int gOmInUseCount; 155 156 }; 157 158 // ObjectLocker enforced balanced locking and can never thrown an 159 // IllegalMonitorStateException. However, a pending exception may 160 // have to pass through, and we must also be able to deal with 161 // asynchronous exceptions. The caller is responsible for checking 162 // the threads pending exception if needed. 163 // doLock was added to support classloading with UnsyncloadClass which 164 // requires flag based choice of locking the classloader lock. 165 class ObjectLocker : public StackObj { 166 private: 167 Thread* _thread; 168 Handle _obj; 169 BasicLock _lock; 170 bool _dolock; // default true 171 public: 172 ObjectLocker(Handle obj, Thread* thread, bool doLock = true); 173 ~ObjectLocker(); 174 175 // Monitor behavior 176 void wait (TRAPS) { ObjectSynchronizer::wait (_obj, 0, CHECK); } // wait forever 177 void notify_all(TRAPS) { ObjectSynchronizer::notifyall(_obj, CHECK); } 178 void waitUninterruptibly (TRAPS) { ObjectSynchronizer::waitUninterruptibly (_obj, 0, CHECK);} 179 // complete_exit gives up lock completely, returning recursion count 180 // reenter reclaims lock with original recursion count 181 intptr_t complete_exit(TRAPS) { return ObjectSynchronizer::complete_exit(_obj, THREAD); } 182 void reenter(intptr_t recursion, TRAPS) { ObjectSynchronizer::reenter(_obj, recursion, CHECK); } 183 }; 184 185 #endif // SHARE_VM_RUNTIME_SYNCHRONIZER_HPP