1 /*
  2  * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "opto/locknode.hpp"
 27 #include "opto/parse.hpp"
 28 #include "opto/rootnode.hpp"
 29 #include "opto/runtime.hpp"
 30 
 31 //=============================================================================
 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
 33   return _inmask;
 34 }
 35 
 36 const RegMask &BoxLockNode::out_RegMask() const {
 37   return *Matcher::idealreg2regmask[Op_RegP];
 38 }
 39 
 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
 41 
 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
 43                                        _slot(slot), _is_eliminated(false) {
 44   init_class_id(Class_BoxLock);
 45   init_flags(Flag_rematerialize);
 46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
 47   if (!RegMask::can_represent(reg, Compile::current()->sync_stack_slots())) {
 48     Compile::current()->record_method_not_compilable("must be able to represent all monitor slots in reg mask");
 49     return;
 50   }
 51   _inmask.Insert(reg);
 52 }
 53 
 54 //-----------------------------hash--------------------------------------------
 55 uint BoxLockNode::hash() const {
 56   if (EliminateNestedLocks)
 57     return NO_HASH; // Each locked region has own BoxLock node
 58   return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
 59 }
 60 
 61 //------------------------------cmp--------------------------------------------
 62 bool BoxLockNode::cmp( const Node &n ) const {
 63   if (EliminateNestedLocks)
 64     return (&n == this); // Always fail except on self
 65   const BoxLockNode &bn = (const BoxLockNode &)n;
 66   return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
 67 }
 68 
 69 BoxLockNode* BoxLockNode::box_node(Node* box) {
 70   // Chase down the BoxNode after RA which may spill box nodes.
 71   while (!box->is_BoxLock()) {
 72     //    if (box_node->is_SpillCopy()) {
 73     //      Node *m = box_node->in(1);
 74     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
 75     //        box_node = m->in(m->as_Mach()->operand_index(2));
 76     //        continue;
 77     //      }
 78     //    }
 79     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
 80     // Only BoxLock nodes with the same stack slot are merged.
 81     // So it is enough to trace one path to find the slot value.
 82     box = box->in(1);
 83   }
 84   return box->as_BoxLock();
 85 }
 86 
 87 OptoReg::Name BoxLockNode::reg(Node* box) {
 88   return box_node(box)->in_RegMask(0).find_first_elem();
 89 }
 90 
 91 // Is BoxLock node used for one simple lock region (same box and obj)?
 92 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
 93   LockNode* lock = nullptr;
 94   bool has_one_lock = false;
 95   for (uint i = 0; i < this->outcnt(); i++) {
 96     Node* n = this->raw_out(i);
 97     assert(!n->is_Phi(), "should not merge BoxLock nodes");
 98     if (n->is_AbstractLock()) {
 99       AbstractLockNode* alock = n->as_AbstractLock();
100       // Check lock's box since box could be referenced by Lock's debug info.
101       if (alock->box_node() == this) {
102         if (alock->obj_node()->eqv_uncast(obj)) {
103           if ((unique_lock != nullptr) && alock->is_Lock()) {
104             if (lock == nullptr) {
105               lock = alock->as_Lock();
106               has_one_lock = true;
107             } else if (lock != alock->as_Lock()) {
108               has_one_lock = false;
109               if (bad_lock != nullptr) {
110                 *bad_lock = alock;
111               }
112             }
113           }
114         } else {
115           if (bad_lock != nullptr) {
116             *bad_lock = alock;
117           }
118           return false; // Different objects
119         }
120       }
121     }
122   }
123 #ifdef ASSERT
124   // Verify that FastLock and Safepoint reference only this lock region.
125   for (uint i = 0; i < this->outcnt(); i++) {
126     Node* n = this->raw_out(i);
127     if (n->is_FastLock()) {
128       FastLockNode* flock = n->as_FastLock();
129       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
130     }
131     // Don't check monitor info in safepoints since the referenced object could
132     // be different from the locked object. It could be Phi node of different
133     // cast nodes which point to this locked object.
134     // We assume that no other objects could be referenced in monitor info
135     // associated with this BoxLock node because all associated locks and
136     // unlocks are reference only this one object.
137   }
138 #endif
139   if (unique_lock != nullptr && has_one_lock) {
140     *unique_lock = lock;
141   }
142   return true;
143 }
144 
145 //=============================================================================
146 //-----------------------------hash--------------------------------------------
147 uint FastLockNode::hash() const { return NO_HASH; }
148 
149 uint FastLockNode::size_of() const { return sizeof(*this); }
150 
151 //------------------------------cmp--------------------------------------------
152 bool FastLockNode::cmp( const Node &n ) const {
153   return (&n == this);                // Always fail except on self
154 }
155 
156 const Type* FastLockNode::Value(PhaseGVN* phase) const {
157   const Type* in1_t = phase->type(in(1));
158   if (in1_t == Type::TOP) {
159     return Type::TOP;
160   }
161   if (in1_t->is_inlinetypeptr()) {
162     // Locking on inline types always fails
163     return TypeInt::CC_GT;
164   }
165   return TypeInt::CC;
166 }
167 
168 //=============================================================================
169 //-----------------------------hash--------------------------------------------
170 uint FastUnlockNode::hash() const { return NO_HASH; }
171 
172 //------------------------------cmp--------------------------------------------
173 bool FastUnlockNode::cmp( const Node &n ) const {
174   return (&n == this);                // Always fail except on self
175 }
176 
177 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
178 #if INCLUDE_RTM_OPT
179   Compile* C = Compile::current();
180   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
181     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
182            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
183     _rtm_counters = rlnc->counters();
184     if (UseRTMForStackLocks) {
185       rlnc = (RTMLockingNamedCounter*)
186            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
187       _stack_rtm_counters = rlnc->counters();
188     }
189   }
190 #endif
191 }
192 
193 //=============================================================================
194 //------------------------------do_monitor_enter-------------------------------
195 void Parse::do_monitor_enter() {
196   kill_dead_locals();
197 
198   // Null check; get casted pointer.
199   Node* obj = null_check(peek());
200   // Check for locking null object
201   if (stopped()) return;
202 
203   {
204     // Synchronizing on an inline type is not allowed
205     BuildCutout unless(this, inline_type_test(obj, /* is_inline = */ false), PROB_MAX);
206     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_none);
207   }
208 
209   // the monitor object is not part of debug info expression stack
210   pop();
211 
212   // Insert a FastLockNode which takes as arguments the current thread pointer,
213   // the obj pointer & the address of the stack slot pair used for the lock.
214   shared_lock(obj);
215 }
216 
217 //------------------------------do_monitor_exit--------------------------------
218 void Parse::do_monitor_exit() {
219   kill_dead_locals();
220 
221   pop();                        // Pop oop to unlock
222   // Because monitors are guaranteed paired (else we bail out), we know
223   // the matching Lock for this Unlock.  Hence we know there is no need
224   // for a null check on Unlock.
225   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
226 }