1 /*
  2  * Copyright (c) 1999, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "opto/locknode.hpp"
 26 #include "opto/parse.hpp"
 27 #include "opto/regmask.hpp"
 28 #include "opto/rootnode.hpp"
 29 #include "opto/runtime.hpp"
 30 
 31 //=============================================================================
 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
 33   return _inmask;
 34 }
 35 
 36 const RegMask &BoxLockNode::out_RegMask() const {
 37   return *Matcher::idealreg2regmask[Op_RegP];
 38 }
 39 
 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
 41 
 42 BoxLockNode::BoxLockNode(int slot)
 43     : Node(Compile::current()->root()),
 44       _slot(slot),
 45       // In debug mode, signal that the register mask is constant.
 46       _inmask(OptoReg::stack2reg(_slot),
 47               Compile::current()->comp_arena()
 48               DEBUG_ONLY(COMMA /*read_only*/ true)),
 49       _kind(BoxLockNode::Regular) {
 50   init_class_id(Class_BoxLock);
 51   init_flags(Flag_rematerialize);
 52   if (_slot > BoxLockNode_SLOT_LIMIT) {
 53     Compile::current()->record_method_not_compilable(
 54         "reached BoxLockNode slot limit");
 55     return;
 56   }
 57 }
 58 
 59 uint BoxLockNode::hash() const {
 60   if (EliminateNestedLocks) {
 61     return NO_HASH; // Each locked region has own BoxLock node
 62   }
 63   return Node::hash() + _slot + (is_eliminated() ? Compile::current()->fixed_slots() : 0);
 64 }
 65 
 66 bool BoxLockNode::cmp( const Node &n ) const {
 67   if (EliminateNestedLocks) {
 68     return (&n == this); // Always fail except on self
 69   }
 70   const BoxLockNode &bn = (const BoxLockNode &)n;
 71   return (bn._slot == _slot) && (bn.is_eliminated() == is_eliminated());
 72 }
 73 
 74 Node* BoxLockNode::Identity(PhaseGVN* phase) {
 75   if (!EliminateNestedLocks && !this->is_eliminated()) {
 76     Node* n = phase->hash_find(this);
 77     if (n == nullptr || n == this) {
 78       return this;
 79     }
 80     BoxLockNode* old_box = n->as_BoxLock();
 81     // Set corresponding status (_kind) when commoning BoxLock nodes.
 82     if (this->_kind != old_box->_kind) {
 83       if (this->is_unbalanced()) {
 84         old_box->set_unbalanced();
 85       }
 86       if (!old_box->is_unbalanced()) {
 87         // Only Regular or Coarsened status should be here:
 88         // Nested and Local are set only when EliminateNestedLocks is on.
 89         if (old_box->is_regular()) {
 90           assert(this->is_coarsened(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 91           old_box->set_coarsened();
 92         } else {
 93           assert(this->is_regular(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 94           assert(old_box->is_coarsened(),"unexpected kind: %s", _kind_name[(int)old_box->_kind]);
 95         }
 96       }
 97     }
 98     return old_box;
 99   }
100   return this;
101 }
102 
103 BoxLockNode* BoxLockNode::box_node(Node* box) {
104   // Chase down the BoxNode after RA which may spill box nodes.
105   while (!box->is_BoxLock()) {
106     //    if (box_node->is_SpillCopy()) {
107     //      Node *m = box_node->in(1);
108     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
109     //        box_node = m->in(m->as_Mach()->operand_index(2));
110     //        continue;
111     //      }
112     //    }
113     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
114     // Only BoxLock nodes with the same stack slot are merged.
115     // So it is enough to trace one path to find the slot value.
116     box = box->in(1);
117   }
118   return box->as_BoxLock();
119 }
120 
121 OptoReg::Name BoxLockNode::reg(Node* box) {
122   return box_node(box)->in_RegMask(0).find_first_elem();
123 }
124 
125 // Is BoxLock node used for one simple lock region (same box and obj)?
126 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
127   if (is_unbalanced()) {
128     return false;
129   }
130   LockNode* lock = nullptr;
131   bool has_one_lock = false;
132   for (uint i = 0; i < this->outcnt(); i++) {
133     Node* n = this->raw_out(i);
134     assert(!n->is_Phi(), "should not merge BoxLock nodes");
135     if (n->is_AbstractLock()) {
136       AbstractLockNode* alock = n->as_AbstractLock();
137       // Check lock's box since box could be referenced by Lock's debug info.
138       if (alock->box_node() == this) {
139         if (alock->obj_node()->eqv_uncast(obj)) {
140           if ((unique_lock != nullptr) && alock->is_Lock()) {
141             if (lock == nullptr) {
142               lock = alock->as_Lock();
143               has_one_lock = true;
144             } else if (lock != alock->as_Lock()) {
145               has_one_lock = false;
146               if (bad_lock != nullptr) {
147                 *bad_lock = alock;
148               }
149             }
150           }
151         } else {
152           if (bad_lock != nullptr) {
153             *bad_lock = alock;
154           }
155           return false; // Different objects
156         }
157       }
158     }
159   }
160 #ifdef ASSERT
161   // Verify that FastLock and Safepoint reference only this lock region.
162   for (uint i = 0; i < this->outcnt(); i++) {
163     Node* n = this->raw_out(i);
164     if (n->is_FastLock()) {
165       FastLockNode* flock = n->as_FastLock();
166       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
167     }
168     // Don't check monitor info in safepoints since the referenced object could
169     // be different from the locked object. It could be Phi node of different
170     // cast nodes which point to this locked object.
171     // We assume that no other objects could be referenced in monitor info
172     // associated with this BoxLock node because all associated locks and
173     // unlocks are reference only this one object.
174   }
175 #endif
176   if (unique_lock != nullptr && has_one_lock) {
177     *unique_lock = lock;
178   }
179   return true;
180 }
181 
182 //=============================================================================
183 //-----------------------------hash--------------------------------------------
184 uint FastLockNode::hash() const { return NO_HASH; }
185 
186 uint FastLockNode::size_of() const { return sizeof(*this); }
187 
188 //------------------------------cmp--------------------------------------------
189 bool FastLockNode::cmp( const Node &n ) const {
190   return (&n == this);                // Always fail except on self
191 }
192 
193 const Type* FastLockNode::Value(PhaseGVN* phase) const {
194   const Type* in1_t = phase->type(in(1));
195   if (in1_t == Type::TOP) {
196     return Type::TOP;
197   }
198   if (in1_t->is_inlinetypeptr()) {
199     // Locking on inline types always fails
200     return TypeInt::CC_GT;
201   }
202   return TypeInt::CC;
203 }
204 
205 //=============================================================================
206 //-----------------------------hash--------------------------------------------
207 uint FastUnlockNode::hash() const { return NO_HASH; }
208 
209 //------------------------------cmp--------------------------------------------
210 bool FastUnlockNode::cmp( const Node &n ) const {
211   return (&n == this);                // Always fail except on self
212 }
213 
214 //=============================================================================
215 //------------------------------do_monitor_enter-------------------------------
216 void Parse::do_monitor_enter() {
217   kill_dead_locals();
218 
219   // Null check; get casted pointer.
220   Node* obj = null_check(peek());
221   // Check for locking null object
222   if (stopped()) return;
223 
224   {
225     // Synchronizing on an inline type is not allowed
226     BuildCutout unless(this, inline_type_test(obj, /* is_inline = */ false), PROB_MAX);
227     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_none);
228   }
229 
230   // the monitor object is not part of debug info expression stack
231   pop();
232 
233   // Insert a FastLockNode which takes as arguments the current thread pointer,
234   // the obj pointer & the address of the stack slot pair used for the lock.
235   shared_lock(obj);
236 }
237 
238 //------------------------------do_monitor_exit--------------------------------
239 void Parse::do_monitor_exit() {
240   kill_dead_locals();
241 
242   pop();                        // Pop oop to unlock
243   // Because monitors are guaranteed paired (else we bail out), we know
244   // the matching Lock for this Unlock.  Hence we know there is no need
245   // for a null check on Unlock.
246   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
247 }