< prev index next >

src/hotspot/share/opto/locknode.cpp

Print this page

132     // unlocks are reference only this one object.
133   }
134 #endif
135   if (unique_lock != NULL && has_one_lock) {
136     *unique_lock = lock;
137   }
138   return true;
139 }
140 
141 //=============================================================================
142 //-----------------------------hash--------------------------------------------
143 uint FastLockNode::hash() const { return NO_HASH; }
144 
145 uint FastLockNode::size_of() const { return sizeof(*this); }
146 
147 //------------------------------cmp--------------------------------------------
148 bool FastLockNode::cmp( const Node &n ) const {
149   return (&n == this);                // Always fail except on self
150 }
151 








152 //=============================================================================
153 //-----------------------------hash--------------------------------------------
154 uint FastUnlockNode::hash() const { return NO_HASH; }
155 
156 //------------------------------cmp--------------------------------------------
157 bool FastUnlockNode::cmp( const Node &n ) const {
158   return (&n == this);                // Always fail except on self
159 }
160 
161 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
162 #if INCLUDE_RTM_OPT
163   Compile* C = Compile::current();
164   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
165     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
166            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
167     _rtm_counters = rlnc->counters();
168     if (UseRTMForStackLocks) {
169       rlnc = (RTMLockingNamedCounter*)
170            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
171       _stack_rtm_counters = rlnc->counters();
172     }
173   }
174 #endif
175 }
176 
177 //=============================================================================
178 //------------------------------do_monitor_enter-------------------------------
179 void Parse::do_monitor_enter() {
180   kill_dead_locals();
181 








182   // Null check; get casted pointer.
183   Node* obj = null_check(peek());
184   // Check for locking null object
185   if (stopped()) return;
186 
187   // the monitor object is not part of debug info expression stack
188   pop();
189 
190   // Insert a FastLockNode which takes as arguments the current thread pointer,
191   // the obj pointer & the address of the stack slot pair used for the lock.
192   shared_lock(obj);
193 }
194 
195 //------------------------------do_monitor_exit--------------------------------
196 void Parse::do_monitor_exit() {
197   kill_dead_locals();
198 
199   pop();                        // Pop oop to unlock
200   // Because monitors are guaranteed paired (else we bail out), we know
201   // the matching Lock for this Unlock.  Hence we know there is no need
202   // for a null check on Unlock.
203   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());

132     // unlocks are reference only this one object.
133   }
134 #endif
135   if (unique_lock != NULL && has_one_lock) {
136     *unique_lock = lock;
137   }
138   return true;
139 }
140 
141 //=============================================================================
142 //-----------------------------hash--------------------------------------------
143 uint FastLockNode::hash() const { return NO_HASH; }
144 
145 uint FastLockNode::size_of() const { return sizeof(*this); }
146 
147 //------------------------------cmp--------------------------------------------
148 bool FastLockNode::cmp( const Node &n ) const {
149   return (&n == this);                // Always fail except on self
150 }
151 
152 const Type* FastLockNode::Value(PhaseGVN* phase) const {
153   if (phase->type(in(1))->is_inlinetypeptr()) {
154     // Locking on inline types always fails
155     return TypeInt::CC_GT;
156   }
157   return TypeInt::CC;
158 }
159 
160 //=============================================================================
161 //-----------------------------hash--------------------------------------------
162 uint FastUnlockNode::hash() const { return NO_HASH; }
163 
164 //------------------------------cmp--------------------------------------------
165 bool FastUnlockNode::cmp( const Node &n ) const {
166   return (&n == this);                // Always fail except on self
167 }
168 
169 void FastLockNode::create_rtm_lock_counter(JVMState* state) {
170 #if INCLUDE_RTM_OPT
171   Compile* C = Compile::current();
172   if (C->profile_rtm() || (PrintPreciseRTMLockingStatistics && C->use_rtm())) {
173     RTMLockingNamedCounter* rlnc = (RTMLockingNamedCounter*)
174            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
175     _rtm_counters = rlnc->counters();
176     if (UseRTMForStackLocks) {
177       rlnc = (RTMLockingNamedCounter*)
178            OptoRuntime::new_named_counter(state, NamedCounter::RTMLockingCounter);
179       _stack_rtm_counters = rlnc->counters();
180     }
181   }
182 #endif
183 }
184 
185 //=============================================================================
186 //------------------------------do_monitor_enter-------------------------------
187 void Parse::do_monitor_enter() {
188   kill_dead_locals();
189 
190   Node* obj = peek();
191   const Type* obj_type = gvn().type(obj);
192   if (obj_type->isa_inlinetype() || obj_type->is_inlinetypeptr()) {
193     uncommon_trap(Deoptimization::Reason_class_check,
194                   Deoptimization::Action_none);
195     return;
196   }
197 
198   // Null check; get casted pointer.
199   obj = null_check(obj);
200   // Check for locking null object
201   if (stopped()) return;
202 
203   // the monitor object is not part of debug info expression stack
204   pop();
205 
206   // Insert a FastLockNode which takes as arguments the current thread pointer,
207   // the obj pointer & the address of the stack slot pair used for the lock.
208   shared_lock(obj);
209 }
210 
211 //------------------------------do_monitor_exit--------------------------------
212 void Parse::do_monitor_exit() {
213   kill_dead_locals();
214 
215   pop();                        // Pop oop to unlock
216   // Because monitors are guaranteed paired (else we bail out), we know
217   // the matching Lock for this Unlock.  Hence we know there is no need
218   // for a null check on Unlock.
219   shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
< prev index next >