132 static OopStorage* _oop_storage;
133
134 // The sync code expects the header field to be at offset zero (0).
135 // Enforced by the assert() in header_addr().
136 volatile markWord _header; // displaced object header word - mark
137 WeakHandle _object; // backward object pointer
138 // Separate _header and _owner on different cache lines since both can
139 // have busy multi-threaded access. _header and _object are set at initial
140 // inflation. The _object does not change, so it is a good choice to share
141 // its cache line with _header.
142 DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE, sizeof(volatile markWord) +
143 sizeof(WeakHandle));
144 // Used by async deflation as a marker in the _owner field.
145 // Note that the choice of the two markers is peculiar:
146 // - They need to represent values that cannot be pointers. In particular,
147 // we achieve this by using the lowest two bits.
148 // - ANONYMOUS_OWNER should be a small value, it is used in generated code
149 // and small values encode much better.
150 // - We test for anonymous owner by testing for the lowest bit, therefore
151 // DEFLATER_MARKER must *not* have that bit set.
152 #define DEFLATER_MARKER reinterpret_cast<void*>(2)
153 public:
154 // NOTE: Typed as uintptr_t so that we can pick it up in SA, via vmStructs.
155 static const uintptr_t ANONYMOUS_OWNER = 1;
156
157 private:
158 static void* anon_owner_ptr() { return reinterpret_cast<void*>(ANONYMOUS_OWNER); }
159
160 void* volatile _owner; // pointer to owning thread OR BasicLock
161 volatile uint64_t _previous_owner_tid; // thread id of the previous owner of the monitor
162 // Separate _owner and _next_om on different cache lines since
163 // both can have busy multi-threaded access. _previous_owner_tid is only
164 // changed by ObjectMonitor::exit() so it is a good choice to share the
165 // cache line with _owner.
166 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
167 sizeof(volatile uint64_t));
168 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
169 volatile intx _recursions; // recursion count, 0 for first entry
170 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
171 // The list is actually composed of WaitNodes,
172 // acting as proxies for Threads.
173
174 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
175 JavaThread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
196 // objects which can happen at normal VM shutdown.
197 //
198 #define OM_PERFDATA_OP(f, op_str) \
199 do { \
200 if (ObjectMonitor::_sync_ ## f != nullptr && \
201 PerfDataManager::has_PerfData()) { \
202 ObjectMonitor::_sync_ ## f->op_str; \
203 } \
204 } while (0)
205
206 static PerfCounter * _sync_ContendedLockAttempts;
207 static PerfCounter * _sync_FutileWakeups;
208 static PerfCounter * _sync_Parks;
209 static PerfCounter * _sync_Notifications;
210 static PerfCounter * _sync_Inflations;
211 static PerfCounter * _sync_Deflations;
212 static PerfLongVariable * _sync_MonExtant;
213
214 static int Knob_SpinLimit;
215
216 static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); }
217 static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); }
218 static ByteSize cxq_offset() { return byte_offset_of(ObjectMonitor, _cxq); }
219 static ByteSize succ_offset() { return byte_offset_of(ObjectMonitor, _succ); }
220 static ByteSize EntryList_offset() { return byte_offset_of(ObjectMonitor, _EntryList); }
221
222 // ObjectMonitor references can be ORed with markWord::monitor_value
223 // as part of the ObjectMonitor tagging mechanism. When we combine an
224 // ObjectMonitor reference with an offset, we need to remove the tag
225 // value in order to generate the proper address.
226 //
227 // We can either adjust the ObjectMonitor reference and then add the
228 // offset or we can adjust the offset that is added to the ObjectMonitor
229 // reference. The latter avoids an AGI (Address Generation Interlock)
230 // stall so the helper macro adjusts the offset value that is returned
231 // to the ObjectMonitor reference manipulation code:
232 //
233 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
234 ((in_bytes(ObjectMonitor::f ## _offset())) - checked_cast<int>(markWord::monitor_value))
235
236 markWord header() const;
237 volatile markWord* header_addr();
238 void set_header(markWord hdr);
239
240 bool is_busy() const {
241 // TODO-FIXME: assert _owner == null implies _recursions = 0
242 intptr_t ret_code = intptr_t(_waiters) | intptr_t(_cxq) | intptr_t(_EntryList);
243 int cnts = contentions(); // read once
244 if (cnts > 0) {
245 ret_code |= intptr_t(cnts);
246 }
247 if (!owner_is_DEFLATER_MARKER()) {
248 ret_code |= intptr_t(owner_raw());
249 }
250 return ret_code != 0;
251 }
252 const char* is_busy_to_string(stringStream* ss);
253
254 bool is_entered(JavaThread* current) const;
255
256 // Returns true if this OM has an owner, false otherwise.
257 bool has_owner() const;
258 void* owner() const; // Returns null if DEFLATER_MARKER is observed.
259 void* owner_raw() const;
260 // Returns true if owner field == DEFLATER_MARKER and false otherwise.
261 bool owner_is_DEFLATER_MARKER() const;
262 // Returns true if 'this' is being async deflated and false otherwise.
263 bool is_being_async_deflated();
264 // Clear _owner field; current value must match old_value.
265 void release_clear_owner(void* old_value);
266 // Simply set _owner field to new_value; current value must match old_value.
267 void set_owner_from(void* old_value, void* new_value);
268 // Simply set _owner field to current; current value must match basic_lock_p.
269 void set_owner_from_BasicLock(void* basic_lock_p, JavaThread* current);
270 // Try to set _owner field to new_value if the current value matches
271 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
289 // Simply set _next_om field to new_value.
290 void set_next_om(ObjectMonitor* new_value);
291
292 int waiters() const;
293
294 int contentions() const;
295 void add_to_contentions(int value);
296 intx recursions() const { return _recursions; }
297 void set_recursions(size_t recursions);
298
299 // JVM/TI GetObjectMonitorUsage() needs this:
300 ObjectWaiter* first_waiter() { return _WaitSet; }
301 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
302 JavaThread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
303
304 ObjectMonitor(oop object);
305 ~ObjectMonitor();
306
307 oop object() const;
308 oop object_peek() const;
309
310 // Returns true if the specified thread owns the ObjectMonitor. Otherwise
311 // returns false and throws IllegalMonitorStateException (IMSE).
312 bool check_owner(TRAPS);
313
314 private:
315 class ExitOnSuspend {
316 protected:
317 ObjectMonitor* _om;
318 bool _om_exited;
319 public:
320 ExitOnSuspend(ObjectMonitor* om) : _om(om), _om_exited(false) {}
321 void operator()(JavaThread* current);
322 bool exited() { return _om_exited; }
323 };
324 class ClearSuccOnSuspend {
325 protected:
326 ObjectMonitor* _om;
327 public:
328 ClearSuccOnSuspend(ObjectMonitor* om) : _om(om) {}
329 void operator()(JavaThread* current);
330 };
331 public:
332 bool enter_for(JavaThread* locking_thread);
333 bool enter(JavaThread* current);
334 void exit(JavaThread* current, bool not_suspended = true);
335 void wait(jlong millis, bool interruptible, TRAPS);
336 void notify(TRAPS);
337 void notifyAll(TRAPS);
338
339 void print() const;
340 #ifdef ASSERT
341 void print_debug_style_on(outputStream* st) const;
342 #endif
343 void print_on(outputStream* st) const;
344
345 // Use the following at your own risk
346 intx complete_exit(JavaThread* current);
347
348 private:
349 void AddWaiter(ObjectWaiter* waiter);
350 void INotify(JavaThread* current);
351 ObjectWaiter* DequeueWaiter();
352 void DequeueSpecificWaiter(ObjectWaiter* waiter);
353 void EnterI(JavaThread* current);
354 void ReenterI(JavaThread* current, ObjectWaiter* current_node);
355 void UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* current_node);
356 int TryLock(JavaThread* current);
357 int TrySpin(JavaThread* current);
358 void ExitEpilog(JavaThread* current, ObjectWaiter* Wakee);
359
360 // Deflation support
361 bool deflate_monitor();
362 void install_displaced_markword_in_object(const oop obj);
363 };
364
365 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
132 static OopStorage* _oop_storage;
133
134 // The sync code expects the header field to be at offset zero (0).
135 // Enforced by the assert() in header_addr().
136 volatile markWord _header; // displaced object header word - mark
137 WeakHandle _object; // backward object pointer
138 // Separate _header and _owner on different cache lines since both can
139 // have busy multi-threaded access. _header and _object are set at initial
140 // inflation. The _object does not change, so it is a good choice to share
141 // its cache line with _header.
142 DEFINE_PAD_MINUS_SIZE(0, OM_CACHE_LINE_SIZE, sizeof(volatile markWord) +
143 sizeof(WeakHandle));
144 // Used by async deflation as a marker in the _owner field.
145 // Note that the choice of the two markers is peculiar:
146 // - They need to represent values that cannot be pointers. In particular,
147 // we achieve this by using the lowest two bits.
148 // - ANONYMOUS_OWNER should be a small value, it is used in generated code
149 // and small values encode much better.
150 // - We test for anonymous owner by testing for the lowest bit, therefore
151 // DEFLATER_MARKER must *not* have that bit set.
152 static const uintptr_t DEFLATER_MARKER_VALUE = 2;
153 #define DEFLATER_MARKER reinterpret_cast<void*>(DEFLATER_MARKER_VALUE)
154 public:
155 // NOTE: Typed as uintptr_t so that we can pick it up in SA, via vmStructs.
156 static const uintptr_t ANONYMOUS_OWNER = 1;
157 static const uintptr_t ANONYMOUS_OWNER_OR_DEFLATER_MARKER = ANONYMOUS_OWNER | DEFLATER_MARKER_VALUE;
158
159 private:
160 static void* anon_owner_ptr() { return reinterpret_cast<void*>(ANONYMOUS_OWNER); }
161
162 void* volatile _owner; // pointer to owning thread OR BasicLock
163 volatile uint64_t _previous_owner_tid; // thread id of the previous owner of the monitor
164 // Separate _owner and _next_om on different cache lines since
165 // both can have busy multi-threaded access. _previous_owner_tid is only
166 // changed by ObjectMonitor::exit() so it is a good choice to share the
167 // cache line with _owner.
168 DEFINE_PAD_MINUS_SIZE(1, OM_CACHE_LINE_SIZE, sizeof(void* volatile) +
169 sizeof(volatile uint64_t));
170 ObjectMonitor* _next_om; // Next ObjectMonitor* linkage
171 volatile intx _recursions; // recursion count, 0 for first entry
172 ObjectWaiter* volatile _EntryList; // Threads blocked on entry or reentry.
173 // The list is actually composed of WaitNodes,
174 // acting as proxies for Threads.
175
176 ObjectWaiter* volatile _cxq; // LL of recently-arrived threads blocked on entry.
177 JavaThread* volatile _succ; // Heir presumptive thread - used for futile wakeup throttling
198 // objects which can happen at normal VM shutdown.
199 //
200 #define OM_PERFDATA_OP(f, op_str) \
201 do { \
202 if (ObjectMonitor::_sync_ ## f != nullptr && \
203 PerfDataManager::has_PerfData()) { \
204 ObjectMonitor::_sync_ ## f->op_str; \
205 } \
206 } while (0)
207
208 static PerfCounter * _sync_ContendedLockAttempts;
209 static PerfCounter * _sync_FutileWakeups;
210 static PerfCounter * _sync_Parks;
211 static PerfCounter * _sync_Notifications;
212 static PerfCounter * _sync_Inflations;
213 static PerfCounter * _sync_Deflations;
214 static PerfLongVariable * _sync_MonExtant;
215
216 static int Knob_SpinLimit;
217
218 static ByteSize header_offset() { return byte_offset_of(ObjectMonitor, _header); }
219 static ByteSize owner_offset() { return byte_offset_of(ObjectMonitor, _owner); }
220 static ByteSize recursions_offset() { return byte_offset_of(ObjectMonitor, _recursions); }
221 static ByteSize cxq_offset() { return byte_offset_of(ObjectMonitor, _cxq); }
222 static ByteSize succ_offset() { return byte_offset_of(ObjectMonitor, _succ); }
223 static ByteSize EntryList_offset() { return byte_offset_of(ObjectMonitor, _EntryList); }
224
225 // ObjectMonitor references can be ORed with markWord::monitor_value
226 // as part of the ObjectMonitor tagging mechanism. When we combine an
227 // ObjectMonitor reference with an offset, we need to remove the tag
228 // value in order to generate the proper address.
229 //
230 // We can either adjust the ObjectMonitor reference and then add the
231 // offset or we can adjust the offset that is added to the ObjectMonitor
232 // reference. The latter avoids an AGI (Address Generation Interlock)
233 // stall so the helper macro adjusts the offset value that is returned
234 // to the ObjectMonitor reference manipulation code:
235 //
236 // Lightweight locking fetches ObjectMonitor references from a cache
237 // instead of the markWord and doesn't work with tagged values.
238 //
239 #define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
240 ((in_bytes(ObjectMonitor::f ## _offset())) - (LockingMode == LM_LIGHTWEIGHT ? 0 : checked_cast<int>(markWord::monitor_value)))
241
242 markWord header() const;
243 uintptr_t header_value() const;
244 volatile markWord* header_addr();
245 void set_header(markWord hdr);
246
247 // TODO[OMWorld]: Cleanup these names, the storage `_header` usage depends on the locking mode.
248 intptr_t hash_lightweight_locking() const;
249 void set_hash_lightweight_locking(intptr_t hash);
250
251 bool is_busy() const {
252 // TODO-FIXME: assert _owner == null implies _recursions = 0
253 intptr_t ret_code = intptr_t(_waiters) | intptr_t(_cxq) | intptr_t(_EntryList);
254 int cnts = contentions(); // read once
255 if (cnts > 0) {
256 ret_code |= intptr_t(cnts);
257 }
258 if (!owner_is_DEFLATER_MARKER()) {
259 ret_code |= intptr_t(owner_raw());
260 }
261 return ret_code != 0;
262 }
263 bool is_contended() const {
264 intptr_t ret_code = intptr_t(_waiters) | intptr_t(_cxq) | intptr_t(_EntryList);
265 int cnts = contentions();
266 if (cnts > 0) {
267 ret_code |= intptr_t(cnts);
268 }
269 return ret_code != 0;
270 }
271 const char* is_busy_to_string(stringStream* ss);
272
273 bool is_entered(JavaThread* current) const;
274
275 // Returns true if this OM has an owner, false otherwise.
276 bool has_owner() const;
277 void* owner() const; // Returns null if DEFLATER_MARKER is observed.
278 void* owner_raw() const;
279 // Returns true if owner field == DEFLATER_MARKER and false otherwise.
280 bool owner_is_DEFLATER_MARKER() const;
281 // Returns true if 'this' is being async deflated and false otherwise.
282 bool is_being_async_deflated();
283 // Clear _owner field; current value must match old_value.
284 void release_clear_owner(void* old_value);
285 // Simply set _owner field to new_value; current value must match old_value.
286 void set_owner_from(void* old_value, void* new_value);
287 // Simply set _owner field to current; current value must match basic_lock_p.
288 void set_owner_from_BasicLock(void* basic_lock_p, JavaThread* current);
289 // Try to set _owner field to new_value if the current value matches
290 // old_value, using Atomic::cmpxchg(). Otherwise, does not change the
308 // Simply set _next_om field to new_value.
309 void set_next_om(ObjectMonitor* new_value);
310
311 int waiters() const;
312
313 int contentions() const;
314 void add_to_contentions(int value);
315 intx recursions() const { return _recursions; }
316 void set_recursions(size_t recursions);
317
318 // JVM/TI GetObjectMonitorUsage() needs this:
319 ObjectWaiter* first_waiter() { return _WaitSet; }
320 ObjectWaiter* next_waiter(ObjectWaiter* o) { return o->_next; }
321 JavaThread* thread_of_waiter(ObjectWaiter* o) { return o->_thread; }
322
323 ObjectMonitor(oop object);
324 ~ObjectMonitor();
325
326 oop object() const;
327 oop object_peek() const;
328 bool object_is_cleared() const;
329 bool object_is_dead() const;
330 bool object_refers_to(oop obj) const;
331
332 // Returns true if the specified thread owns the ObjectMonitor. Otherwise
333 // returns false and throws IllegalMonitorStateException (IMSE).
334 bool check_owner(TRAPS);
335
336 private:
337 class ExitOnSuspend {
338 protected:
339 ObjectMonitor* _om;
340 bool _om_exited;
341 public:
342 ExitOnSuspend(ObjectMonitor* om) : _om(om), _om_exited(false) {}
343 void operator()(JavaThread* current);
344 bool exited() { return _om_exited; }
345 };
346 class ClearSuccOnSuspend {
347 protected:
348 ObjectMonitor* _om;
349 public:
350 ClearSuccOnSuspend(ObjectMonitor* om) : _om(om) {}
351 void operator()(JavaThread* current);
352 };
353 public:
354 bool try_enter(JavaThread* current);
355 bool enter_for(JavaThread* locking_thread);
356 bool enter(JavaThread* current);
357 void exit(JavaThread* current, bool not_suspended = true);
358 void wait(jlong millis, bool interruptible, TRAPS);
359 void notify(TRAPS);
360 void notifyAll(TRAPS);
361
362 void print() const;
363 #ifdef ASSERT
364 void print_debug_style_on(outputStream* st) const;
365 #endif
366 void print_on(outputStream* st) const;
367
368 // Use the following at your own risk
369 intx complete_exit(JavaThread* current);
370
371 private:
372 void AddWaiter(ObjectWaiter* waiter);
373 void INotify(JavaThread* current);
374 ObjectWaiter* DequeueWaiter();
375 void DequeueSpecificWaiter(ObjectWaiter* waiter);
376 void EnterI(JavaThread* current);
377 void ReenterI(JavaThread* current, ObjectWaiter* current_node);
378 void UnlinkAfterAcquire(JavaThread* current, ObjectWaiter* current_node);
379 int TryLock(JavaThread* current);
380 int TrySpin(JavaThread* current);
381 void ExitEpilog(JavaThread* current, ObjectWaiter* Wakee);
382
383 // Deflation support
384 bool deflate_monitor(Thread* current);
385 public:
386 bool deflate_anon_monitor(JavaThread* current);
387 private:
388 void install_displaced_markword_in_object(const oop obj);
389 };
390
391 // RAII object to ensure that ObjectMonitor::is_being_async_deflated() is
392 // stable within the context of this mark.
393 class ObjectMonitorContentionMark {
394 ObjectMonitor* _monitor;
395
396 public:
397 ObjectMonitorContentionMark(ObjectMonitor* monitor);
398 ~ObjectMonitorContentionMark();
399 };
400
401 #endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|