132 // unlocks are reference only this one object.
133 }
134 #endif
135 if (unique_lock != nullptr && has_one_lock) {
136 *unique_lock = lock;
137 }
138 return true;
139 }
140
141 //=============================================================================
142 //-----------------------------hash--------------------------------------------
143 uint FastLockNode::hash() const { return NO_HASH; }
144
145 uint FastLockNode::size_of() const { return sizeof(*this); }
146
147 //------------------------------cmp--------------------------------------------
148 bool FastLockNode::cmp( const Node &n ) const {
149 return (&n == this); // Always fail except on self
150 }
151
152 //=============================================================================
153 //-----------------------------hash--------------------------------------------
154 uint FastUnlockNode::hash() const { return NO_HASH; }
155
156 //------------------------------cmp--------------------------------------------
157 bool FastUnlockNode::cmp( const Node &n ) const {
158 return (&n == this); // Always fail except on self
159 }
160
161 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
162 #if INCLUDE_RTM_OPT
163 Compile* C = Compile::current();
164 if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
165 RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
166 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
167 _rtm_counters = rlnc->counters();
168 if (UseRTMForStackLocks) {
169 rlnc = (RTMLockingNamedCounter*)
170 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
171 _stack_rtm_counters = rlnc->counters();
172 }
173 }
174 #endif
175 }
176
177 //=============================================================================
178 //------------------------------do_monitor_enter-------------------------------
179 void Parse::do_monitor_enter() {
180 kill_dead_locals();
181
182 C->set_has_monitors(true);
183
184 // Null check; get casted pointer.
185 Node* obj = null_check(peek());
186 // Check for locking null object
187 if (stopped()) return;
188
189 // the monitor object is not part of debug info expression stack
190 pop();
191
192 // Insert a FastLockNode which takes as arguments the current thread pointer,
193 // the obj pointer & the address of the stack slot pair used for the lock.
194 shared_lock(obj);
195 }
196
197 //------------------------------do_monitor_exit--------------------------------
198 void Parse::do_monitor_exit() {
199 kill_dead_locals();
200
201 // need to set it for monitor exit as well.
202 // OSR compiled methods can start with lock taken
203 C->set_has_monitors(true);
204
205 pop(); // Pop oop to unlock
|
132 // unlocks are reference only this one object.
133 }
134 #endif
135 if (unique_lock != nullptr && has_one_lock) {
136 *unique_lock = lock;
137 }
138 return true;
139 }
140
141 //=============================================================================
142 //-----------------------------hash--------------------------------------------
143 uint FastLockNode::hash() const { return NO_HASH; }
144
145 uint FastLockNode::size_of() const { return sizeof(*this); }
146
147 //------------------------------cmp--------------------------------------------
148 bool FastLockNode::cmp( const Node &n ) const {
149 return (&n == this); // Always fail except on self
150 }
151
152 const Type* FastLockNode::Value(PhaseGVN* phase) const {
153 const Type* in1_t = phase->type(in(1));
154 if (in1_t == Type::TOP) {
155 return Type::TOP;
156 }
157 if (in1_t->is_inlinetypeptr()) {
158 // Locking on inline types always fails
159 return TypeInt::CC_GT;
160 }
161 return TypeInt::CC;
162 }
163
164 //=============================================================================
165 //-----------------------------hash--------------------------------------------
166 uint FastUnlockNode::hash() const { return NO_HASH; }
167
168 //------------------------------cmp--------------------------------------------
169 bool FastUnlockNode::cmp( const Node &n ) const {
170 return (&n == this); // Always fail except on self
171 }
172
173 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
174 #if INCLUDE_RTM_OPT
175 Compile* C = Compile::current();
176 if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
177 RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
178 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
179 _rtm_counters = rlnc->counters();
180 if (UseRTMForStackLocks) {
181 rlnc = (RTMLockingNamedCounter*)
182 OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
183 _stack_rtm_counters = rlnc->counters();
184 }
185 }
186 #endif
187 }
188
189 //=============================================================================
190 //------------------------------do_monitor_enter-------------------------------
191 void Parse::do_monitor_enter() {
192 kill_dead_locals();
193
194 Node* obj = peek();
195 const Type* obj_type = gvn().type(obj);
196 if (obj_type->is_inlinetypeptr()) {
197 uncommon_trap(Deoptimization::Reason_class_check,
198 Deoptimization::Action_none);
199 return;
200 }
201
202 C->set_has_monitors(true);
203
204 // Null check; get casted pointer.
205 obj = null_check(obj);
206 // Check for locking null object
207 if (stopped()) return;
208
209 // the monitor object is not part of debug info expression stack
210 pop();
211
212 // Insert a FastLockNode which takes as arguments the current thread pointer,
213 // the obj pointer & the address of the stack slot pair used for the lock.
214 shared_lock(obj);
215 }
216
217 //------------------------------do_monitor_exit--------------------------------
218 void Parse::do_monitor_exit() {
219 kill_dead_locals();
220
221 // need to set it for monitor exit as well.
222 // OSR compiled methods can start with lock taken
223 C->set_has_monitors(true);
224
225 pop(); // Pop oop to unlock
|