1 /*
2 * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OPTO_LOCKNODE_HPP
26 #define SHARE_OPTO_LOCKNODE_HPP
27
28 #include "opto/node.hpp"
29 #include "opto/opcodes.hpp"
30 #include "opto/subnode.hpp"
31
32 //------------------------------BoxLockNode------------------------------------
33 class BoxLockNode : public Node {
34 private:
35 const int _slot; // stack slot
36 RegMask _inmask; // OptoReg corresponding to stack slot
37 enum {
38 Regular = 0, // Normal locking region
39 Local, // EA found that local not escaping object is used for locking
40 Nested, // This region is inside other region which use the same object
41 Coarsened, // Some lock/unlock in region were marked as coarsened
42 Unbalanced, // This region become unbalanced after coarsened lock/unlock were eliminated
43 // or it is locking region from OSR when locking is done in Interpreter
44 Eliminated // All lock/unlock in region were eliminated
45 } _kind;
46
47 #ifndef PRODUCT
48 const char* _kind_name[6] = {
49 "Regular",
50 "Local",
51 "Nested",
52 "Coarsened",
53 "Unbalanced",
54 "Eliminated"
55 };
56 #endif
57
58 // Allowed transitions of _kind:
59 // Regular -> Local, Nested, Coarsened
60 // Local -> Eliminated
61 // Nested -> Eliminated
62 // Coarsened -> Local, Nested, Unbalanced
63 // EA and nested lock elimination can overwrite Coarsened kind.
64 // Also allow transition to the same kind.
65
66 public:
67 BoxLockNode( int lock );
68 virtual int Opcode() const;
69 virtual void emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const;
70 virtual uint size(PhaseRegAlloc *ra_) const;
71 virtual const RegMask &in_RegMask(uint) const;
72 virtual const RegMask &out_RegMask() const;
73 virtual uint size_of() const;
74 virtual uint hash() const;
75 virtual bool cmp( const Node &n ) const;
76 virtual const class Type *bottom_type() const { return TypeRawPtr::BOTTOM; }
77 virtual uint ideal_reg() const { return Op_RegP; }
78 virtual Node* Identity(PhaseGVN* phase);
79
80 static OptoReg::Name reg(Node* box_node);
81 static BoxLockNode* box_node(Node* box_node);
82 static bool same_slot(Node* box1, Node* box2) {
83 return box1->as_BoxLock()->_slot == box2->as_BoxLock()->_slot;
84 }
85 int stack_slot() const { return _slot; }
86
87 bool is_regular() const { return _kind == Regular; }
88 bool is_local() const { return _kind == Local; }
89 bool is_nested() const { return _kind == Nested; }
90 bool is_coarsened() const { return _kind == Coarsened; }
91 bool is_eliminated() const { return _kind == Eliminated; }
92 bool is_unbalanced() const { return _kind == Unbalanced; }
93
94 void set_local() {
95 assert((_kind == Regular || _kind == Local || _kind == Coarsened),
96 "incorrect kind for Local transitioni: %s", _kind_name[(int)_kind]);
97 _kind = Local;
98 }
99 void set_nested() {
100 assert((_kind == Regular || _kind == Nested || _kind == Coarsened),
101 "incorrect kind for Nested transition: %s", _kind_name[(int)_kind]);
102 _kind = Nested;
103 }
104 void set_coarsened() {
105 assert((_kind == Regular || _kind == Coarsened),
106 "incorrect kind for Coarsened transition: %s", _kind_name[(int)_kind]);
107 _kind = Coarsened;
108 }
109 void set_eliminated() {
110 assert((_kind == Local || _kind == Nested),
111 "incorrect kind for Eliminated transition: %s", _kind_name[(int)_kind]);
112 _kind = Eliminated;
113 }
114 void set_unbalanced() {
115 assert((_kind == Coarsened || _kind == Unbalanced),
116 "incorrect kind for Unbalanced transition: %s", _kind_name[(int)_kind]);
117 _kind = Unbalanced;
118 }
119
120 // Is BoxLock node used for one simple lock region?
121 bool is_simple_lock_region(LockNode** unique_lock, Node* obj, Node** bad_lock);
122
123 #ifndef PRODUCT
124 virtual void format( PhaseRegAlloc *, outputStream *st ) const;
125 virtual void dump_spec(outputStream *st) const {
126 st->print(" Lock slot: %d, Kind: %s", _slot, _kind_name[(int)_kind]);
127 }
128 #endif
129 };
130
131 //------------------------------FastLockNode-----------------------------------
132 class FastLockNode: public CmpNode {
133 public:
134 FastLockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
135 init_req(0,ctrl);
136 init_class_id(Class_FastLock);
137 }
138 Node* obj_node() const { return in(1); }
139 Node* box_node() const { return in(2); }
140 void set_box_node(Node* box) { set_req(2, box); }
141
142 // FastLock and FastUnlockNode do not hash, we need one for each corresponding
143 // LockNode/UnLockNode to avoid creating Phi's.
144 virtual uint hash() const ; // { return NO_HASH; }
145 virtual uint size_of() const;
146 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
147 virtual int Opcode() const;
148 virtual const Type* Value(PhaseGVN* phase) const { return TypeInt::CC; }
149 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
150 };
151
152
153 //------------------------------FastUnlockNode---------------------------------
154 class FastUnlockNode: public CmpNode {
155 public:
156 FastUnlockNode(Node *ctrl, Node *oop, Node *box) : CmpNode(oop,box) {
157 init_req(0,ctrl);
158 init_class_id(Class_FastUnlock);
159 }
160 Node* obj_node() const { return in(1); }
161 Node* box_node() const { return in(2); }
162
163
164 // FastLock and FastUnlockNode do not hash, we need one for each corresponding
165 // LockNode/UnLockNode to avoid creating Phi's.
166 virtual uint hash() const ; // { return NO_HASH; }
167 virtual bool cmp( const Node &n ) const ; // Always fail, except on self
168 virtual int Opcode() const;
169 virtual const Type* Value(PhaseGVN* phase) const { return TypeInt::CC; }
170 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
171
172 };
173
174 #endif // SHARE_OPTO_LOCKNODE_HPP
--- EOF ---