1 /*
  2  * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "opto/locknode.hpp"
 27 #include "opto/parse.hpp"
 28 #include "opto/rootnode.hpp"
 29 #include "opto/runtime.hpp"
 30 
 31 //=============================================================================
 32 const RegMask &BoxLockNode::in_RegMask(uint i) const {
 33   return _inmask;
 34 }
 35 
 36 const RegMask &BoxLockNode::out_RegMask() const {
 37   return *Matcher::idealreg2regmask[Op_RegP];
 38 }
 39 
 40 uint BoxLockNode::size_of() const { return sizeof(*this); }
 41 
 42 BoxLockNode::BoxLockNode( int slot ) : Node( Compile::current()->root() ),
 43                                        _slot(slot), _is_eliminated(false) {
 44   init_class_id(Class_BoxLock);
 45   init_flags(Flag_rematerialize);
 46   OptoReg::Name reg = OptoReg::stack2reg(_slot);
 47   _inmask.Insert(reg);
 48 }
 49 
 50 //-----------------------------hash--------------------------------------------
 51 uint BoxLockNode::hash() const {
 52   if (EliminateNestedLocks)
 53     return NO_HASH; // Each locked region has own BoxLock node
 54   return Node::hash() + _slot + (_is_eliminated ? Compile::current()->fixed_slots() : 0);
 55 }
 56 
 57 //------------------------------cmp--------------------------------------------
 58 bool BoxLockNode::cmp( const Node &n ) const {
 59   if (EliminateNestedLocks)
 60     return (&n == this); // Always fail except on self
 61   const BoxLockNode &bn = (const BoxLockNode &)n;
 62   return bn._slot == _slot && bn._is_eliminated == _is_eliminated;
 63 }
 64 
 65 BoxLockNode* BoxLockNode::box_node(Node* box) {
 66   // Chase down the BoxNode after RA which may spill box nodes.
 67   while (!box->is_BoxLock()) {
 68     //    if (box_node->is_SpillCopy()) {
 69     //      Node *m = box_node->in(1);
 70     //      if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_StoreP) {
 71     //        box_node = m->in(m->as_Mach()->operand_index(2));
 72     //        continue;
 73     //      }
 74     //    }
 75     assert(box->is_SpillCopy() || box->is_Phi(), "Bad spill of Lock.");
 76     // Only BoxLock nodes with the same stack slot are merged.
 77     // So it is enough to trace one path to find the slot value.
 78     box = box->in(1);
 79   }
 80   return box->as_BoxLock();
 81 }
 82 
 83 OptoReg::Name BoxLockNode::reg(Node* box) {
 84   return box_node(box)->in_RegMask(0).find_first_elem();
 85 }
 86 
 87 // Is BoxLock node used for one simple lock region (same box and obj)?
 88 bool BoxLockNode::is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock) {
 89   LockNode* lock = nullptr;
 90   bool has_one_lock = false;
 91   for (uint i = 0; i < this->outcnt(); i++) {
 92     Node* n = this->raw_out(i);
 93     assert(!n->is_Phi(), "should not merge BoxLock nodes");
 94     if (n->is_AbstractLock()) {
 95       AbstractLockNode* alock = n->as_AbstractLock();
 96       // Check lock's box since box could be referenced by Lock's debug info.
 97       if (alock->box_node() == this) {
 98         if (alock->obj_node()->eqv_uncast(obj)) {
 99           if ((unique_lock != nullptr) && alock->is_Lock()) {
100             if (lock == nullptr) {
101               lock = alock->as_Lock();
102               has_one_lock = true;
103             } else if (lock != alock->as_Lock()) {
104               has_one_lock = false;
105               if (bad_lock != nullptr) {
106                 *bad_lock = alock;
107               }
108             }
109           }
110         } else {
111           if (bad_lock != nullptr) {
112             *bad_lock = alock;
113           }
114           return false; // Different objects
115         }
116       }
117     }
118   }
119 #ifdef ASSERT
120   // Verify that FastLock and Safepoint reference only this lock region.
121   for (uint i = 0; i < this->outcnt(); i++) {
122     Node* n = this->raw_out(i);
123     if (n->is_FastLock()) {
124       FastLockNode* flock = n->as_FastLock();
125       assert((flock->box_node() == this) && flock->obj_node()->eqv_uncast(obj),"");
126     }
127     // Don't check monitor info in safepoints since the referenced object could
128     // be different from the locked object. It could be Phi node of different
129     // cast nodes which point to this locked object.
130     // We assume that no other objects could be referenced in monitor info
131     // associated with this BoxLock node because all associated locks and
132     // unlocks are reference only this one object.
133   }
134 #endif
135   if (unique_lock != nullptr && has_one_lock) {
136     *unique_lock = lock;
137   }
138   return true;
139 }
140 
141 //=============================================================================
142 //-----------------------------hash--------------------------------------------
143 uint FastLockNode::hash() const { return NO_HASH; }
144 
145 uint FastLockNode::size_of() const { return sizeof(*this); }
146 
147 //------------------------------cmp--------------------------------------------
148 bool FastLockNode::cmp( const Node &n ) const {
149   return (&n == this);                // Always fail except on self
150 }
151 
152 //=============================================================================
153 //-----------------------------hash--------------------------------------------
154 uint FastUnlockNode::hash() const { return NO_HASH; }
155 
156 //------------------------------cmp--------------------------------------------
157 bool FastUnlockNode::cmp( const Node &n ) const {
158   return (&n == this);                // Always fail except on self
159 }
160 
161 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
162 #if INCLUDE_RTM_OPT
163   Compile* C = Compile::current();
164   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
165     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
166            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
167     _rtm_counters = rlnc->counters();
168     if (UseRTMForStackLocks) {
169       rlnc = (RTMLockingNamedCounter*)
170            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
171       _stack_rtm_counters = rlnc->counters();
172     }
173   }
174 #endif
175 }
176 
177 //=============================================================================
178 //------------------------------do_monitor_enter-------------------------------
179 void Parse::do_monitor_enter() {
180   kill_dead_locals();
181 
182   // Null check; get casted pointer.
183   Node* obj = null_check(peek());
184   // Check for locking null object
185   if (stopped()) return;
186 
187   // the monitor object is not part of debug info expression stack
188   pop();
189 
190   if (DoPartialEscapeAnalysis) {
191     PEAState& state = jvms()->alloc_state();
192     VirtualState* vs = state.as_virtual(PEA(), obj);
193 
194     if (vs != nullptr) {
195       vs->lock_inc();
196     }
197   }
198 
199   // Insert a FastLockNode which takes as arguments the current thread pointer,
200   // the obj pointer & the address of the stack slot pair used for the lock.
201   shared_lock(obj);
202 }
203 
204 //------------------------------do_monitor_exit--------------------------------
205 void Parse::do_monitor_exit() {
206   kill_dead_locals();
207 
208   // need to set it for monitor exit as well.
209   // OSR compiled methods can start with lock taken
210   C->set_has_monitors(true);
211   Node* obj = map()->peek_monitor_obj();
212 
213   if (DoPartialEscapeAnalysis) {
214     PEAState& state = jvms()->alloc_state();
215     ObjID id = PEA()->is_alias(obj); //
216     if (id != nullptr && state.contains(id)) {
217       ObjectState* os = state.get_object_state(id);
218       if (os->is_virtual()) {
219         static_cast<VirtualState*>(os)->lock_dec();
220       } else {
221         auto materialized = state.get_materialized_value(id);
222         if (materialized != nullptr) {
223           obj = materialized;
224 
225           if (obj->is_Phi()) {
226             pop();
227 
228             // We need to split Phi + Unlock like this:
229             // +------+   +-----+ +------+
230             // |Region|   | obj | | obj' | (PEA materialized object)
231             // +------+   +-----+ +------+
232             //        \     |    /
233             //        +------------+
234             //        |phi|        |
235             //        +------------+
236             //               |P0
237             //           +--------------+
238             //           | UnlockNode   |
239             //           +--------------+
240             //
241             // split this phi because it helps EA and ME eliminate Unlock nodes.
242             // bytecode monitor_exit post-dominates the object, so PhiNode has been finalized.
243             // for GraphKit::shared_unlock(), the only side-effect is ctrl + abio + memory.
244             //
245             //
246             // +------+          +-----+                      +------+
247             // |Region|          | obj |                      | obj' |
248             // +------+          +-----+                      +------+
249             //                      |                         /
250             //                      |P0                      |P0
251             //                  +--------------+         +--------------+
252             //                  | UnlockNode   |         | UnlockNode   |
253             //                  +--------------+         +--------------+
254             //                    |   |abio |memory        |  |abio |memory
255             //                    |                 /-----/
256             //                    | ctrl           / ctrl
257             //                 +--------------------+
258             //                 | new_rgn            | ( new_rgn merges abio and memory as well)
259             //                 +--------------------+
260             //                    |ctrl |abio |memory
261             //                   +--------------------+
262             //                   | sfpt (map)         |
263             //                   +--------------------+
264             //
265             RegionNode* region = obj->in(0)->as_Region();
266             BoxLockNode* box = map()->peek_monitor_box()->as_BoxLock();
267 
268             Node* new_rgn = new RegionNode(region->req());
269             gvn().set_type(new_rgn, Type::CONTROL);
270 
271             bool merged = false;
272             GraphKit saved_ctx = {clone_map()->jvms()};
273 
274             // reverse i to simulate merging normal paths.
275             // merge_memory_edges() will do GVN when i == 1
276             for (uint i = region->req()-1; i > 0; --i) {
277               Node* ctrl  = region->in(i);
278               Node* abio = nullptr;
279               MergeMemNode* mem = nullptr;
280 
281               if (ctrl != nullptr && ctrl != C->top()) {
282                 SafePointNode* curr = saved_ctx.clone_map();
283                 GraphKit kit = { curr->jvms() };
284 
285                 kit.set_control(ctrl);
286                 // We need to resume SafePointNode to the state as if it was the predecessor controlled by region->in(i).
287                 for (uint j = 1; j < curr->req(); ++j) {
288                   Node* m = curr->in(j);
289 
290                   if (j == TypeFunc::Memory) {
291                     if (m->is_Phi() && m->in(0) == region) {
292                       m = m->in(i);
293                     } else if (m->is_MergeMem()) {
294                       // a blank memory
295                       MergeMemNode* new_all_mem = MergeMemNode::make(MergeMemNode::make_empty_memory());
296                       new_all_mem->grow_to_match(m->as_MergeMem());
297                       for (MergeMemStream mms(m->as_MergeMem()); mms.next_non_empty(); ) {
298                         Node* p = mms.memory();
299                         if (p->is_Phi() && p->in(0) == region) {
300                           new_all_mem->set_req(mms.alias_idx(), p->in(i));
301                         } else {
302                           new_all_mem->set_req(mms.alias_idx(), p);
303                         }
304                       }
305                       m = new_all_mem;
306                     }
307                     curr->set_memory(m);
308                   } else if (m != nullptr && m->is_Phi() && m->in(0) == region) {
309                     curr->set_req(j, m->in(i));
310                   }
311                 }
312 
313                 kit.shared_unlock(box, obj->in(i), true);
314 
315                 ctrl = kit.control();
316                 mem = kit.merged_memory();
317                 abio = kit.i_o();
318               } else {
319                 assert(false, "impossible! monitorExit must post-dominate the PhiNode.");
320               }
321 
322               new_rgn->init_req(i, ctrl);
323 
324               if (!merged) {
325                 merged = true;
326                 set_control(new_rgn); // merge_memory_edges() requires that ctrl() is a RegionNode.
327                 set_all_memory(mem);
328                 set_i_o(abio);
329               } else {
330                 merge_memory_edges(mem, i, false);
331                 Node* phi = i_o();
332                 if (!(phi->is_Phi() && phi->in(0) != new_rgn)) {
333                   phi = PhiNode::make(new_rgn, phi);
334                   gvn().set_type(phi, Type::ABIO);
335                   record_for_igvn(phi);
336                 }
337                 phi->set_req(i, abio);
338                 set_i_o(phi);
339               }
340             }
341 
342             new_rgn = _gvn.transform(new_rgn);
343             set_control(new_rgn);
344             record_for_igvn(new_rgn);
345 
346             map()->pop_monitor();
347             return ;
348           }
349         }
350       }
351     }
352   }
353 
354   pop();                        // Pop oop to unlock
355   // Because monitors are guaranteed paired (else we bail out), we know
356   // the matching Lock for this Unlock.  Hence we know there is no need
357   // for a null check on Unlock.
358   shared_unlock(map()->peek_monitor_box(), obj);
359 }