1 /*
  2  * Copyright (c) 2019, Red Hat, Inc. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 
 27 #include "runtime/os.hpp"
 28 
 29 #include "gc/shenandoah/shenandoahLock.hpp"
 30 #include "runtime/atomic.hpp"
 31 #include "runtime/interfaceSupport.inline.hpp"
 32 #include "runtime/javaThread.hpp"
 33 #include "runtime/os.inline.hpp"
 34 
 35 void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
 36   Thread* thread = Thread::current();
 37   if (allow_block_for_safepoint && thread->is_Java_thread()) {
 38     contended_lock_internal<true>(JavaThread::cast(thread));
 39   } else {
 40     contended_lock_internal<false>(nullptr);
 41   }
 42 }
 43 
 44 template<bool ALLOW_BLOCK>
 45 void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
 46   assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
 47   // Spin this much, but only on multi-processor systems.
 48   int ctr = os::is_MP() ? 0xFF : 0;
 49   // Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
 50   while (Atomic::load(&_state) == locked ||
 51          Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
 52     if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
 53       // Lightly contended, spin a little if no safepoint is pending.
 54       SpinPause();
 55       ctr--;
 56     } else if (ALLOW_BLOCK) {
 57       ThreadBlockInVM block(java_thread);
 58       if (SafepointSynchronize::is_synchronizing()) {
 59         // If safepoint is pending, we want to block and allow safepoint to proceed.
 60         // Normally, TBIVM above would block us in its destructor.
 61         //
 62         // But that blocking only happens when TBIVM knows the thread poll is armed.
 63         // There is a window between announcing a safepoint and arming the thread poll
 64         // during which trying to continuously enter TBIVM is counter-productive.
 65         // Under high contention, we may end up going in circles thousands of times.
 66         // To avoid it, we wait here until local poll is armed and then proceed
 67         // to TBVIM exit for blocking. We do not SpinPause, but yield to let
 68         // VM thread to arm the poll sooner.
 69         while (SafepointSynchronize::is_synchronizing() &&
 70                !SafepointMechanism::local_poll_armed(java_thread)) {
 71           os::naked_yield();
 72         }
 73       } else {
 74         os::naked_yield();
 75       }
 76     } else {
 77       os::naked_yield();
 78     }
 79   }
 80 }
 81 
 82 ShenandoahSimpleLock::ShenandoahSimpleLock() {
 83   assert(os::mutex_init_done(), "Too early!");
 84 }
 85 
 86 void ShenandoahSimpleLock::lock() {
 87   _lock.lock();
 88 }
 89 
 90 void ShenandoahSimpleLock::unlock() {
 91   _lock.unlock();
 92 }
 93 
 94 ShenandoahReentrantLock::ShenandoahReentrantLock() :
 95   ShenandoahSimpleLock(), _owner(nullptr), _count(0) {
 96   assert(os::mutex_init_done(), "Too early!");
 97 }
 98 
 99 ShenandoahReentrantLock::~ShenandoahReentrantLock() {
100   assert(_count == 0, "Unbalance");
101 }
102 
103 void ShenandoahReentrantLock::lock() {
104   Thread* const thread = Thread::current();
105   Thread* const owner = Atomic::load(&_owner);
106 
107   if (owner != thread) {
108     ShenandoahSimpleLock::lock();
109     Atomic::store(&_owner, thread);
110   }
111 
112   _count++;
113 }
114 
115 void ShenandoahReentrantLock::unlock() {
116   assert(owned_by_self(), "Invalid owner");
117   assert(_count > 0, "Invalid count");
118 
119   _count--;
120 
121   if (_count == 0) {
122     Atomic::store(&_owner, (Thread*)nullptr);
123     ShenandoahSimpleLock::unlock();
124   }
125 }
126 
127 bool ShenandoahReentrantLock::owned_by_self() const {
128   Thread* const thread = Thread::current();
129   Thread* const owner = Atomic::load(&_owner);
130   return owner == thread;
131 }