< prev index next >

src/hotspot/share/opto/locknode.cpp

Print this page

171 #if INCLUDE_RTM_OPT
172   Compile* C = Compile::current();
173   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
174     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
175            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
176     _rtm_counters = rlnc->counters();
177     if (UseRTMForStackLocks) {
178       rlnc = (RTMLockingNamedCounter*)
179            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
180       _stack_rtm_counters = rlnc->counters();
181     }
182   }
183 #endif
184 }
185 
186 //=============================================================================
187 //------------------------------do_monitor_enter-------------------------------
188 void Parse::do_monitor_enter() {
189   kill_dead_locals();
190 


191   // Null check; get casted pointer.
192   Node* obj = null_check(peek());
193   // Check for locking null object
194   if (stopped()) return;
195 
196   // the monitor object is not part of debug info expression stack
197   pop();
198 
199   // Insert a FastLockNode which takes as arguments the current thread pointer,
200   // the obj pointer & the address of the stack slot pair used for the lock.
201   shared_lock(obj);
202 }
203 
204 //------------------------------do_monitor_exit--------------------------------
205 void Parse::do_monitor_exit() {
206   kill_dead_locals();
207 
208   pop();                        // Pop oop to unlock
209   // Because monitors are guaranteed paired (else we bail out), we know
210   // the matching Lock for this Unlock.  Hence we know there is no need

171 #if INCLUDE_RTM_OPT
172   Compile* C = Compile::current();
173   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
174     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
175            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
176     _rtm_counters = rlnc->counters();
177     if (UseRTMForStackLocks) {
178       rlnc = (RTMLockingNamedCounter*)
179            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
180       _stack_rtm_counters = rlnc->counters();
181     }
182   }
183 #endif
184 }
185 
186 //=============================================================================
187 //------------------------------do_monitor_enter-------------------------------
188 void Parse::do_monitor_enter() {
189   kill_dead_locals();
190 
191   C->push_monitor();
192 
193   // Null check; get casted pointer.
194   Node* obj = null_check(peek());
195   // Check for locking null object
196   if (stopped()) return;
197 
198   // the monitor object is not part of debug info expression stack
199   pop();
200 
201   // Insert a FastLockNode which takes as arguments the current thread pointer,
202   // the obj pointer & the address of the stack slot pair used for the lock.
203   shared_lock(obj);
204 }
205 
206 //------------------------------do_monitor_exit--------------------------------
207 void Parse::do_monitor_exit() {
208   kill_dead_locals();
209 
210   pop();                        // Pop oop to unlock
211   // Because monitors are guaranteed paired (else we bail out), we know
212   // the matching Lock for this Unlock.  Hence we know there is no need
< prev index next >