1 /*
  2  * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "opto/locknode.hpp"
 27 #include "opto/parse.hpp"
 28 #include "opto/rootnode.hpp"
 29 #include "opto/runtime.hpp"
 30 
 31 //=============================================================================
 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
 33   return _inmask;
 34 }
 35 
 36 const RegMask &BoxLockNode::out_RegMask() const {
 37   return *Matcher::idealreg2regmask[Op_RegP];
 38 }
 39 
 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
 41 
 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
 43                                        _slot(slot), _kind(BoxLockNode::Regular) {
 44   init_class_id(Class_BoxLock);
 45   init_flags(Flag_rematerialize);
 46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
 47   if (!RegMask::can_represent(reg, Compile::current()->sync_stack_slots())) {
 48     Compile::current()->record_method_not_compilable("must be able to represent all monitor slots in reg mask");
 49     return;
 50   }
 51   _inmask.Insert(reg);
 52 }
 53 
 54 uint BoxLockNode::hash() const {
 55   if (EliminateNestedLocks) {
 56     return NO_HASH; // Each locked region has own BoxLock node
 57   }
 58   return Node::hash() + _slot + (is_eliminated() ? Compile::current()->fixed_slots() : 0);
 59 }
 60 
 61 bool BoxLockNode::cmp( const Node &n ) const {
 62   if (EliminateNestedLocks) {
 63     return (&n == this); // Always fail except on self
 64   }
 65   const BoxLockNode &bn = (const BoxLockNode &)n;
 66   return (bn._slot == _slot) && (bn.is_eliminated() == is_eliminated());
 67 }
 68 
 69 Node* BoxLockNode::Identity(PhaseGVN* phase) {
 70   if (!EliminateNestedLocks && !this->is_eliminated()) {
 71     Node* n = phase->hash_find(this);
 72     if (n == nullptr || n == this) {
 73       return this;
 74     }
 75     BoxLockNode* old_box = n->as_BoxLock();
 76     // Set corresponding status (_kind) when commoning BoxLock nodes.
 77     if (this->_kind != old_box->_kind) {
 78       if (this->is_unbalanced()) {
 79         old_box->set_unbalanced();
 80       }
 81       if (!old_box->is_unbalanced()) {
 82         // Only Regular or Coarsened status should be here:
 83         // Nested and Local are set only when EliminateNestedLocks is on.
 84         if (old_box->is_regular()) {
 85           assert(this->is_coarsened(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 86           old_box->set_coarsened();
 87         } else {
 88           assert(this->is_regular(),"unexpected kind: %s", _kind_name[(int)this->_kind]);
 89           assert(old_box->is_coarsened(),"unexpected kind: %s", _kind_name[(int)old_box->_kind]);
 90         }
 91       }
 92     }
 93     return old_box;
 94   }
 95   return this;
 96 }
 97 
 98 BoxLockNode* BoxLockNode::box_node(Node* box) {
 99   // Chase down the BoxNode after RA which may spill box nodes.
100   while (!box->is_BoxLock()) {
101     //    if (box_node->is_SpillCopy()) {
102     //      Node *m = box_node->in(1);
103     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
104     //        box_node = m->in(m->as_Mach()->operand_index(2));
105     //        continue;
106     //      }
107     //    }
108     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
109     // Only BoxLock nodes with the same stack slot are merged.
110     // So it is enough to trace one path to find the slot value.
111     box = box->in(1);
112   }
113   return box->as_BoxLock();
114 }
115 
116 OptoReg::Name BoxLockNode::reg(Node* box) {
117   return box_node(box)->in_RegMask(0).find_first_elem();
118 }
119 
120 // Is BoxLock node used for one simple lock region (same box and obj)?
121 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
122   if (is_unbalanced()) {
123     return false;
124   }
125   LockNode* lock = nullptr;
126   bool has_one_lock = false;
127   for (uint i = 0; i < this->outcnt(); i++) {
128     Node* n = this->raw_out(i);
129     assert(!n->is_Phi(), "should not merge BoxLock nodes");
130     if (n->is_AbstractLock()) {
131       AbstractLockNode* alock = n->as_AbstractLock();
132       // Check lock's box since box could be referenced by Lock's debug info.
133       if (alock->box_node() == this) {
134         if (alock->obj_node()->eqv_uncast(obj)) {
135           if ((unique_lock != nullptr) && alock->is_Lock()) {
136             if (lock == nullptr) {
137               lock = alock->as_Lock();
138               has_one_lock = true;
139             } else if (lock != alock->as_Lock()) {
140               has_one_lock = false;
141               if (bad_lock != nullptr) {
142                 *bad_lock = alock;
143               }
144             }
145           }
146         } else {
147           if (bad_lock != nullptr) {
148             *bad_lock = alock;
149           }
150           return false; // Different objects
151         }
152       }
153     }
154   }
155 #ifdef ASSERT
156   // Verify that FastLock and Safepoint reference only this lock region.
157   for (uint i = 0; i < this->outcnt(); i++) {
158     Node* n = this->raw_out(i);
159     if (n->is_FastLock()) {
160       FastLockNode* flock = n->as_FastLock();
161       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
162     }
163     // Don't check monitor info in safepoints since the referenced object could
164     // be different from the locked object. It could be Phi node of different
165     // cast nodes which point to this locked object.
166     // We assume that no other objects could be referenced in monitor info
167     // associated with this BoxLock node because all associated locks and
168     // unlocks are reference only this one object.
169   }
170 #endif
171   if (unique_lock != nullptr && has_one_lock) {
172     *unique_lock = lock;
173   }
174   return true;
175 }
176 
177 //=============================================================================
178 //-----------------------------hash--------------------------------------------
179 uint FastLockNode::hash() const { return NO_HASH; }
180 
181 uint FastLockNode::size_of() const { return sizeof(*this); }
182 
183 //------------------------------cmp--------------------------------------------
184 bool FastLockNode::cmp( const Node &n ) const {
185   return (&n == this);                // Always fail except on self
186 }
187 
188 const Type* FastLockNode::Value(PhaseGVN* phase) const {
189   const Type* in1_t = phase->type(in(1));
190   if (in1_t == Type::TOP) {
191     return Type::TOP;
192   }
193   if (in1_t->is_inlinetypeptr()) {
194     // Locking on inline types always fails
195     return TypeInt::CC_GT;
196   }
197   return TypeInt::CC;
198 }
199 
200 //=============================================================================
201 //-----------------------------hash--------------------------------------------
202 uint FastUnlockNode::hash() const { return NO_HASH; }
203 
204 //------------------------------cmp--------------------------------------------
205 bool FastUnlockNode::cmp( const Node &n ) const {
206   return (&n == this);                // Always fail except on self
207 }
208 
209 //=============================================================================
210 //------------------------------do_monitor_enter-------------------------------
211 void Parse::do_monitor_enter() {
212   kill_dead_locals();
213 
214   // Null check; get casted pointer.
215   Node* obj = null_check(peek());
216   // Check for locking null object
217   if (stopped()) return;
218 
219   {
220     // Synchronizing on an inline type is not allowed
221     BuildCutout unless(this, inline_type_test(obj, /* is_inline = */ false), PROB_MAX);
222     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_none);
223   }
224 
225   // the monitor object is not part of debug info expression stack
226   pop();
227 
228   // Insert a FastLockNode which takes as arguments the current thread pointer,
229   // the obj pointer & the address of the stack slot pair used for the lock.
230   shared_lock(obj);
231 }
232 
233 //------------------------------do_monitor_exit--------------------------------
234 void Parse::do_monitor_exit() {
235   kill_dead_locals();
236 
237   pop();                        // Pop oop to unlock
238   // Because monitors are guaranteed paired (else we bail out), we know
239   // the matching Lock for this Unlock.  Hence we know there is no need
240   // for a null check on Unlock.
241   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
242 }