1 /* 2 * Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "opto/locknode.hpp" 27 #include "opto/parse.hpp" 28 #include "opto/rootnode.hpp" 29 #include "opto/runtime.hpp" 30 31 //============================================================================= 32 const RegMask &BoxLockNode::in_RegMask(uint i) const { 33 return _inmask; 34 } 35 36 const RegMask &BoxLockNode::out_RegMask() const { 37 return *Matcher::idealreg2regmask[Op_RegP]; 38 } 39 40 uint BoxLockNode::size_of() const { return sizeof(*this); } 41 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ), 43 _slot(slot), _is_eliminated(false) { 44 init_class_id(Class_BoxLock); 45 init_flags(Flag_rematerialize); 46 OptoReg::Name reg = OptoReg::stack2reg(_slot); 47 _inmask.Insert(reg); 48 } 49 50 //-----------------------------hash-------------------------------------------- 51 uint BoxLockNode::hash() const { 52 if (EliminateNestedLocks) 53 return NO_HASH; // Each locked region has own BoxLock node 54 return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0); 55 } 56 57 //------------------------------cmp-------------------------------------------- 58 bool BoxLockNode::cmp( const Node &n ) const { 59 if (EliminateNestedLocks) 60 return (&n == this); // Always fail except on self 61 const BoxLockNode &bn = (const BoxLockNode &)n; 62 return bn._slot == _slot && bn._is_eliminated == _is_eliminated; 63 } 64 65 BoxLockNode* BoxLockNode::box_node(Node* box) { 66 // Chase down the BoxNode after RA which may spill box nodes. 67 while (!box->is_BoxLock()) { 68 // if (box_node->is_SpillCopy()) { 69 // Node *m = box_node->in(1); 70 // if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) { 71 // box_node = m->in(m->as_Mach()->operand_index(2)); 72 // continue; 73 // } 74 // } 75 assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock."); 76 // Only BoxLock nodes with the same stack slot are merged. 77 // So it is enough to trace one path to find the slot value. 78 box = box->in(1); 79 } 80 return box->as_BoxLock(); 81 } 82 83 OptoReg::Name BoxLockNode::reg(Node* box) { 84 return box_node(box)->in_RegMask(0).find_first_elem(); 85 } 86 87 // Is BoxLock node used for one simple lock region (same box and obj)? 88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) { 89 LockNode* lock = NULL; 90 bool has_one_lock = false; 91 for (uint i = 0; i < this->outcnt(); i++) { 92 Node* n = this->raw_out(i); 93 assert(!n->is_Phi(), "should not merge BoxLock nodes"); 94 if (n->is_AbstractLock()) { 95 AbstractLockNode* alock = n->as_AbstractLock(); 96 // Check lock's box since box could be referenced by Lock's debug info. 97 if (alock->box_node() == this) { 98 if (alock->obj_node()->eqv_uncast(obj)) { 99 if ((unique_lock != NULL) && alock->is_Lock()) { 100 if (lock == NULL) { 101 lock = alock->as_Lock(); 102 has_one_lock = true; 103 } else if (lock != alock->as_Lock()) { 104 has_one_lock = false; 105 if (bad_lock != NULL) { 106 *bad_lock = alock; 107 } 108 } 109 } 110 } else { 111 if (bad_lock != NULL) { 112 *bad_lock = alock; 113 } 114 return false; // Different objects 115 } 116 } 117 } 118 } 119 #ifdef ASSERT 120 // Verify that FastLock and Safepoint reference only this lock region. 121 for (uint i = 0; i < this->outcnt(); i++) { 122 Node* n = this->raw_out(i); 123 if (n->is_FastLock()) { 124 FastLockNode* flock = n->as_FastLock(); 125 assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),""); 126 } 127 // Don't check monitor info in safepoints since the referenced object could 128 // be different from the locked object. It could be Phi node of different 129 // cast nodes which point to this locked object. 130 // We assume that no other objects could be referenced in monitor info 131 // associated with this BoxLock node because all associated locks and 132 // unlocks are reference only this one object. 133 } 134 #endif 135 if (unique_lock != NULL && has_one_lock) { 136 *unique_lock = lock; 137 } 138 return true; 139 } 140 141 //============================================================================= 142 //-----------------------------hash-------------------------------------------- 143 uint FastLockNode::hash() const { return NO_HASH; } 144 145 uint FastLockNode::size_of() const { return sizeof(*this); } 146 147 //------------------------------cmp-------------------------------------------- 148 bool FastLockNode::cmp( const Node &n ) const { 149 return (&n == this); // Always fail except on self 150 } 151 152 //============================================================================= 153 //-----------------------------hash-------------------------------------------- 154 uint FastUnlockNode::hash() const { return NO_HASH; } 155 156 //------------------------------cmp-------------------------------------------- 157 bool FastUnlockNode::cmp( const Node &n ) const { 158 return (&n == this); // Always fail except on self 159 } 160 161 // 162 // Create a counter which counts the number of times this lock is acquired 163 // 164 void FastLockNode::create_lock_counter(JVMState* state) { 165 BiasedLockingNamedCounter* blnc = (BiasedLockingNamedCounter*) 166 OptoRuntime::new_named_counter(state, NamedCounter::BiasedLockingCounter); 167 _counters = blnc->counters(); 168 } 169 170 void FastLockNode::create_rtm_lock_counter(JVMState* state) { 171 #if INCLUDE_RTM_OPT 172 Compile* C = Compile::current(); 173 if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) { 174 RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*) 175 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter); 176 _rtm_counters = rlnc->counters(); 177 if (UseRTMForStackLocks) { 178 rlnc = (RTMLockingNamedCounter*) 179 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter); 180 _stack_rtm_counters = rlnc->counters(); 181 } 182 } 183 #endif 184 } 185 186 //============================================================================= 187 //------------------------------do_monitor_enter------------------------------- 188 void Parse::do_monitor_enter() { 189 kill_dead_locals(); 190 191 C->push_monitor(); 192 193 // Null check; get casted pointer. 194 Node* obj = null_check(peek()); 195 // Check for locking null object 196 if (stopped()) return; 197 198 // the monitor object is not part of debug info expression stack 199 pop(); 200 201 // Insert a FastLockNode which takes as arguments the current thread pointer, 202 // the obj pointer & the address of the stack slot pair used for the lock. 203 shared_lock(obj); 204 } 205 206 //------------------------------do_monitor_exit-------------------------------- 207 void Parse::do_monitor_exit() { 208 kill_dead_locals(); 209 210 pop(); // Pop oop to unlock 211 // Because monitors are guaranteed paired (else we bail out), we know 212 // the matching Lock for this Unlock. Hence we know there is no need 213 // for a null check on Unlock. 214 shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj()); 215 }