< prev index next >

src/hotspot/share/opto/locknode.cpp

Print this page

136     // unlocks are reference only this one object.
137   }
138 #endif
139   if (unique_lock != nullptr && has_one_lock) {
140     *unique_lock = lock;
141   }
142   return true;
143 }
144 
145 //=============================================================================
146 //-----------------------------hash--------------------------------------------
147 uint FastLockNode::hash() const { return NO_HASH; }
148 
149 uint FastLockNode::size_of() const { return sizeof(*this); }
150 
151 //------------------------------cmp--------------------------------------------
152 bool FastLockNode::cmp( const Node &n ) const {
153   return (&n == this);                // Always fail except on self
154 }
155 












156 //=============================================================================
157 //-----------------------------hash--------------------------------------------
158 uint FastUnlockNode::hash() const { return NO_HASH; }
159 
160 //------------------------------cmp--------------------------------------------
161 bool FastUnlockNode::cmp( const Node &n ) const {
162   return (&n == this);                // Always fail except on self
163 }
164 
165 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
166 #if INCLUDE_RTM_OPT
167   Compile* C = Compile::current();
168   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
169     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
170            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
171     _rtm_counters = rlnc->counters();
172     if (UseRTMForStackLocks) {
173       rlnc = (RTMLockingNamedCounter*)
174            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
175       _stack_rtm_counters = rlnc->counters();
176     }
177   }
178 #endif
179 }
180 
181 //=============================================================================
182 //------------------------------do_monitor_enter-------------------------------
183 void Parse::do_monitor_enter() {
184   kill_dead_locals();
185 
186   // Null check; get casted pointer.
187   Node* obj = null_check(peek());
188   // Check for locking null object
189   if (stopped()) return;
190 






191   // the monitor object is not part of debug info expression stack
192   pop();
193 
194   // Insert a FastLockNode which takes as arguments the current thread pointer,
195   // the obj pointer & the address of the stack slot pair used for the lock.
196   shared_lock(obj);
197 }
198 
199 //------------------------------do_monitor_exit--------------------------------
200 void Parse::do_monitor_exit() {
201   kill_dead_locals();
202 
203   pop();                        // Pop oop to unlock
204   // Because monitors are guaranteed paired (else we bail out), we know
205   // the matching Lock for this Unlock.  Hence we know there is no need
206   // for a null check on Unlock.
207   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
208 }

136     // unlocks are reference only this one object.
137   }
138 #endif
139   if (unique_lock != nullptr && has_one_lock) {
140     *unique_lock = lock;
141   }
142   return true;
143 }
144 
145 //=============================================================================
146 //-----------------------------hash--------------------------------------------
147 uint FastLockNode::hash() const { return NO_HASH; }
148 
149 uint FastLockNode::size_of() const { return sizeof(*this); }
150 
151 //------------------------------cmp--------------------------------------------
152 bool FastLockNode::cmp( const Node &n ) const {
153   return (&n == this);                // Always fail except on self
154 }
155 
156 const Type* FastLockNode::Value(PhaseGVN* phase) const {
157   const Type* in1_t = phase->type(in(1));
158   if (in1_t == Type::TOP) {
159     return Type::TOP;
160   }
161   if (in1_t->is_inlinetypeptr()) {
162     // Locking on inline types always fails
163     return TypeInt::CC_GT;
164   }
165   return TypeInt::CC;
166 }
167 
168 //=============================================================================
169 //-----------------------------hash--------------------------------------------
170 uint FastUnlockNode::hash() const { return NO_HASH; }
171 
172 //------------------------------cmp--------------------------------------------
173 bool FastUnlockNode::cmp( const Node &n ) const {
174   return (&n == this);                // Always fail except on self
175 }
176 
177 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
178 #if INCLUDE_RTM_OPT
179   Compile* C = Compile::current();
180   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
181     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
182            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
183     _rtm_counters = rlnc->counters();
184     if (UseRTMForStackLocks) {
185       rlnc = (RTMLockingNamedCounter*)
186            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
187       _stack_rtm_counters = rlnc->counters();
188     }
189   }
190 #endif
191 }
192 
193 //=============================================================================
194 //------------------------------do_monitor_enter-------------------------------
195 void Parse::do_monitor_enter() {
196   kill_dead_locals();
197 
198   // Null check; get casted pointer.
199   Node* obj = null_check(peek());
200   // Check for locking null object
201   if (stopped()) return;
202 
203   {
204     // Synchronizing on an inline type is not allowed
205     BuildCutout unless(this, inline_type_test(obj, /* is_inline = */ false), PROB_MAX);
206     uncommon_trap_exact(Deoptimization::Reason_class_check, Deoptimization::Action_none);
207   }
208 
209   // the monitor object is not part of debug info expression stack
210   pop();
211 
212   // Insert a FastLockNode which takes as arguments the current thread pointer,
213   // the obj pointer & the address of the stack slot pair used for the lock.
214   shared_lock(obj);
215 }
216 
217 //------------------------------do_monitor_exit--------------------------------
218 void Parse::do_monitor_exit() {
219   kill_dead_locals();
220 
221   pop();                        // Pop oop to unlock
222   // Because monitors are guaranteed paired (else we bail out), we know
223   // the matching Lock for this Unlock.  Hence we know there is no need
224   // for a null check on Unlock.
225   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
226 }
< prev index next >