1 /* 2 * Copyright (c) 2019, Red Hat, Inc. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 27 #include "runtime/os.hpp" 28 29 #include "gc/shenandoah/shenandoahLock.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/interfaceSupport.inline.hpp" 32 #include "runtime/javaThread.hpp" 33 #include "runtime/os.inline.hpp" 34 35 // These are inline variants of Thread::SpinAcquire with optional blocking in VM. 36 37 class ShenandoahNoBlockOp : public StackObj { 38 public: 39 ShenandoahNoBlockOp(JavaThread* java_thread) { 40 assert(java_thread == nullptr, "Should not pass anything"); 41 } 42 }; 43 44 void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) { 45 Thread* thread = Thread::current(); 46 if (allow_block_for_safepoint && thread->is_Java_thread()) { 47 contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread)); 48 } else { 49 contended_lock_internal<ShenandoahNoBlockOp>(nullptr); 50 } 51 } 52 53 template<typename BlockOp> 54 void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) { 55 int ctr = 0; 56 int yields = 0; 57 while (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) { 58 if ((++ctr & 0xFFF) == 0) { 59 BlockOp block(java_thread); 60 if (yields > 5) { 61 os::naked_short_sleep(1); 62 } else { 63 os::naked_yield(); 64 yields++; 65 } 66 } else { 67 SpinPause(); 68 } 69 } 70 } 71 72 ShenandoahSimpleLock::ShenandoahSimpleLock() { 73 assert(os::mutex_init_done(), "Too early!"); 74 } 75 76 void ShenandoahSimpleLock::lock() { 77 _lock.lock(); 78 } 79 80 void ShenandoahSimpleLock::unlock() { 81 _lock.unlock(); 82 } 83 84 ShenandoahReentrantLock::ShenandoahReentrantLock() : 85 ShenandoahSimpleLock(), _owner(nullptr), _count(0) { 86 assert(os::mutex_init_done(), "Too early!"); 87 } 88 89 ShenandoahReentrantLock::~ShenandoahReentrantLock() { 90 assert(_count == 0, "Unbalance"); 91 } 92 93 void ShenandoahReentrantLock::lock() { 94 Thread* const thread = Thread::current(); 95 Thread* const owner = Atomic::load(&_owner); 96 97 if (owner != thread) { 98 ShenandoahSimpleLock::lock(); 99 Atomic::store(&_owner, thread); 100 } 101 102 _count++; 103 } 104 105 void ShenandoahReentrantLock::unlock() { 106 assert(owned_by_self(), "Invalid owner"); 107 assert(_count > 0, "Invalid count"); 108 109 _count--; 110 111 if (_count == 0) { 112 Atomic::store(&_owner, (Thread*)nullptr); 113 ShenandoahSimpleLock::unlock(); 114 } 115 } 116 117 bool ShenandoahReentrantLock::owned_by_self() const { 118 Thread* const thread = Thread::current(); 119 Thread* const owner = Atomic::load(&_owner); 120 return owner == thread; 121 }