< prev index next >

src/hotspot/share/opto/locknode.cpp

Print this page




 165 #if INCLUDE_RTM_OPT
 166   Compile* C = Compile::current();
 167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
 168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
 169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 170     _rtm_counters = rlnc->counters();
 171     if (UseRTMForStackLocks) {
 172       rlnc = (RTMLockingNamedCounter*)
 173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 174       _stack_rtm_counters = rlnc->counters();
 175     }
 176   }
 177 #endif
 178 }
 179 
 180 //=============================================================================
 181 //------------------------------do_monitor_enter-------------------------------
 182 void Parse::do_monitor_enter() {
 183   kill_dead_locals();
 184 


 185   // Null check; get casted pointer.
 186   Node* obj = null_check(peek());
 187   // Check for locking null object
 188   if (stopped()) return;
 189 
 190   // the monitor object is not part of debug info expression stack
 191   pop();
 192 
 193   // Insert a FastLockNode which takes as arguments the current thread pointer,
 194   // the obj pointer & the address of the stack slot pair used for the lock.
 195   shared_lock(obj);
 196 }
 197 
 198 //------------------------------do_monitor_exit--------------------------------
 199 void Parse::do_monitor_exit() {
 200   kill_dead_locals();
 201 
 202   pop();                        // Pop oop to unlock
 203   // Because monitors are guaranteed paired (else we bail out), we know
 204   // the matching Lock for this Unlock.  Hence we know there is no need


 165 #if INCLUDE_RTM_OPT
 166   Compile* C = Compile::current();
 167   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
 168     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
 169            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 170     _rtm_counters = rlnc->counters();
 171     if (UseRTMForStackLocks) {
 172       rlnc = (RTMLockingNamedCounter*)
 173            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
 174       _stack_rtm_counters = rlnc->counters();
 175     }
 176   }
 177 #endif
 178 }
 179 
 180 //=============================================================================
 181 //------------------------------do_monitor_enter-------------------------------
 182 void Parse::do_monitor_enter() {
 183   kill_dead_locals();
 184 
 185   C->set_has_monitors(true);
 186 
 187   // Null check; get casted pointer.
 188   Node* obj = null_check(peek());
 189   // Check for locking null object
 190   if (stopped()) return;
 191 
 192   // the monitor object is not part of debug info expression stack
 193   pop();
 194 
 195   // Insert a FastLockNode which takes as arguments the current thread pointer,
 196   // the obj pointer & the address of the stack slot pair used for the lock.
 197   shared_lock(obj);
 198 }
 199 
 200 //------------------------------do_monitor_exit--------------------------------
 201 void Parse::do_monitor_exit() {
 202   kill_dead_locals();
 203 
 204   pop();                        // Pop oop to unlock
 205   // Because monitors are guaranteed paired (else we bail out), we know
 206   // the matching Lock for this Unlock.  Hence we know there is no need
< prev index next >