34 #include "runtime/atomic.hpp"
35 #include "runtime/basicLock.inline.hpp"
36 #include "runtime/globals_extension.hpp"
37 #include "runtime/interfaceSupport.inline.hpp"
38 #include "runtime/javaThread.inline.hpp"
39 #include "runtime/lightweightSynchronizer.hpp"
40 #include "runtime/lockStack.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.inline.hpp"
43 #include "runtime/os.hpp"
44 #include "runtime/perfData.inline.hpp"
45 #include "runtime/safepointMechanism.inline.hpp"
46 #include "runtime/safepointVerifiers.hpp"
47 #include "runtime/synchronizer.inline.hpp"
48 #include "runtime/timerTrace.hpp"
49 #include "runtime/trimNativeHeap.hpp"
50 #include "utilities/concurrentHashTable.inline.hpp"
51 #include "utilities/concurrentHashTableTasks.inline.hpp"
52 #include "utilities/globalDefinitions.hpp"
53
54 // ConcurrentHashTable storing links from objects to ObjectMonitors
55 class ObjectMonitorTable : AllStatic {
56 struct Config {
57 using Value = ObjectMonitor*;
58 static uintx get_hash(Value const& value, bool* is_dead) {
59 return (uintx)value->hash();
60 }
61 static void* allocate_node(void* context, size_t size, Value const& value) {
62 ObjectMonitorTable::inc_items_count();
63 return AllocateHeap(size, mtObjectMonitor);
64 };
65 static void free_node(void* context, void* memory, Value const& value) {
66 ObjectMonitorTable::dec_items_count();
67 FreeHeap(memory);
68 }
69 };
70 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
71
72 static ConcurrentTable* _table;
73 static volatile size_t _items_count;
74 static size_t _table_size;
75 static volatile bool _resize;
76
77 class Lookup : public StackObj {
78 oop _obj;
79
80 public:
81 explicit Lookup(oop obj) : _obj(obj) {}
82
83 uintx get_hash() const {
84 uintx hash = _obj->mark().hash();
85 assert(hash != 0, "should have a hash");
86 return hash;
87 }
88
89 bool equals(ObjectMonitor** value) {
90 assert(*value != nullptr, "must be");
91 return (*value)->object_refers_to(_obj);
92 }
93
94 bool is_dead(ObjectMonitor** value) {
95 assert(*value != nullptr, "must be");
96 return false;
97 }
98 };
99
100 class LookupMonitor : public StackObj {
101 ObjectMonitor* _monitor;
102
103 public:
104 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
105
106 uintx get_hash() const {
268 success = grow(current);
269 } else {
270 if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
271 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
272 }
273 lt.print("Start cleaning with load factor %f", get_load_factor());
274 success = clean(current);
275 }
276
277 Atomic::store(&_resize, false);
278
279 return success;
280 }
281
282 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
283 // Enter the monitor into the concurrent hashtable.
284 ObjectMonitor* result = monitor;
285 Lookup lookup_f(obj);
286 auto found_f = [&](ObjectMonitor** found) {
287 assert((*found)->object_peek() == obj, "must be");
288 result = *found;
289 };
290 bool grow;
291 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
292 verify_monitor_get_result(obj, result);
293 if (grow) {
294 try_notify_grow();
295 }
296 return result;
297 }
298
299 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
300 LookupMonitor lookup_f(monitor);
301 return _table->remove(current, lookup_f);
302 }
303
304 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
305 LookupMonitor lookup_f(monitor);
306 bool result = false;
307 auto found_f = [&](ObjectMonitor** found) {
308 result = true;
309 };
310 _table->get(current, lookup_f, found_f);
311 return result;
312 }
313
314 static void print_on(outputStream* st) {
315 auto printer = [&] (ObjectMonitor** entry) {
316 ObjectMonitor* om = *entry;
317 oop obj = om->object_peek();
318 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
319 st->print("object=" PTR_FORMAT, p2i(obj));
320 assert(obj->mark().hash() == om->hash(), "hash must match");
321 st->cr();
322 return true;
323 };
324 if (SafepointSynchronize::is_at_safepoint()) {
325 _table->do_safepoint_scan(printer);
326 } else {
327 _table->do_scan(Thread::current(), printer);
328 }
329 }
330 };
331
332 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
333 volatile size_t ObjectMonitorTable::_items_count = 0;
334 size_t ObjectMonitorTable::_table_size = 0;
335 volatile bool ObjectMonitorTable::_resize = false;
336
337 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
338 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
339
340 ObjectMonitor* monitor = get_monitor_from_table(current, object);
389 // Hopefully the performance counters are allocated on distinct
390 // cache lines to avoid false sharing on MP systems ...
391 OM_PERFDATA_OP(Inflations, inc());
392 log_inflate(current, object, cause);
393 if (event.should_commit()) {
394 post_monitor_inflate_event(&event, object, cause);
395 }
396
397 // The monitor has an anonymous owner so it is safe from async deflation.
398 ObjectSynchronizer::_in_use_list.add(monitor);
399 }
400
401 return monitor;
402 }
403
404 // Add the hashcode to the monitor to match the object and put it in the hashtable.
405 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
406 assert(UseObjectMonitorTable, "must be");
407 assert(obj == monitor->object(), "must be");
408
409 intptr_t hash = obj->mark().hash();
410 assert(hash != 0, "must be set when claiming the object monitor");
411 monitor->set_hash(hash);
412
413 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
414 }
415
416 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
417 assert(UseObjectMonitorTable, "must be");
418 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
419
420 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
421 }
422
423 void LightweightSynchronizer::deflate_mark_word(oop obj) {
424 assert(UseObjectMonitorTable, "must be");
425
426 markWord mark = obj->mark_acquire();
427 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
428
429 while (mark.has_monitor()) {
1200
1201 if (mark.has_monitor()) {
1202 ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1203 ObjectSynchronizer::read_monitor(mark);
1204
1205 if (monitor == nullptr) {
1206 // Take the slow-path on a cache miss.
1207 return false;
1208 }
1209
1210 if (monitor->try_enter(current)) {
1211 // ObjectMonitor enter successful.
1212 cache_setter.set_monitor(monitor);
1213 return true;
1214 }
1215 }
1216
1217 // Slow-path.
1218 return false;
1219 }
|
34 #include "runtime/atomic.hpp"
35 #include "runtime/basicLock.inline.hpp"
36 #include "runtime/globals_extension.hpp"
37 #include "runtime/interfaceSupport.inline.hpp"
38 #include "runtime/javaThread.inline.hpp"
39 #include "runtime/lightweightSynchronizer.hpp"
40 #include "runtime/lockStack.inline.hpp"
41 #include "runtime/mutexLocker.hpp"
42 #include "runtime/objectMonitor.inline.hpp"
43 #include "runtime/os.hpp"
44 #include "runtime/perfData.inline.hpp"
45 #include "runtime/safepointMechanism.inline.hpp"
46 #include "runtime/safepointVerifiers.hpp"
47 #include "runtime/synchronizer.inline.hpp"
48 #include "runtime/timerTrace.hpp"
49 #include "runtime/trimNativeHeap.hpp"
50 #include "utilities/concurrentHashTable.inline.hpp"
51 #include "utilities/concurrentHashTableTasks.inline.hpp"
52 #include "utilities/globalDefinitions.hpp"
53
54 static uintx objhash(oop obj) {
55 if (UseCompactObjectHeaders) {
56 uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
57 assert(hash != 0, "should have a hash");
58 return hash;
59 } else {
60 uintx hash = obj->mark().hash();
61 assert(hash != 0, "should have a hash");
62 return hash;
63 }
64 }
65
66 // ConcurrentHashTable storing links from objects to ObjectMonitors
67 class ObjectMonitorTable : AllStatic {
68 struct Config {
69 using Value = ObjectMonitor*;
70 static uintx get_hash(Value const& value, bool* is_dead) {
71 return (uintx)value->hash();
72 }
73 static void* allocate_node(void* context, size_t size, Value const& value) {
74 ObjectMonitorTable::inc_items_count();
75 return AllocateHeap(size, mtObjectMonitor);
76 };
77 static void free_node(void* context, void* memory, Value const& value) {
78 ObjectMonitorTable::dec_items_count();
79 FreeHeap(memory);
80 }
81 };
82 using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
83
84 static ConcurrentTable* _table;
85 static volatile size_t _items_count;
86 static size_t _table_size;
87 static volatile bool _resize;
88
89 class Lookup : public StackObj {
90 oop _obj;
91
92 public:
93 explicit Lookup(oop obj) : _obj(obj) {}
94
95 uintx get_hash() const {
96 return objhash(_obj);
97 }
98
99 bool equals(ObjectMonitor** value) {
100 assert(*value != nullptr, "must be");
101 return (*value)->object_refers_to(_obj);
102 }
103
104 bool is_dead(ObjectMonitor** value) {
105 assert(*value != nullptr, "must be");
106 return false;
107 }
108 };
109
110 class LookupMonitor : public StackObj {
111 ObjectMonitor* _monitor;
112
113 public:
114 explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
115
116 uintx get_hash() const {
278 success = grow(current);
279 } else {
280 if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
281 lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
282 }
283 lt.print("Start cleaning with load factor %f", get_load_factor());
284 success = clean(current);
285 }
286
287 Atomic::store(&_resize, false);
288
289 return success;
290 }
291
292 static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
293 // Enter the monitor into the concurrent hashtable.
294 ObjectMonitor* result = monitor;
295 Lookup lookup_f(obj);
296 auto found_f = [&](ObjectMonitor** found) {
297 assert((*found)->object_peek() == obj, "must be");
298 assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
299 result = *found;
300 };
301 bool grow;
302 _table->insert_get(current, lookup_f, monitor, found_f, &grow);
303 verify_monitor_get_result(obj, result);
304 if (grow) {
305 try_notify_grow();
306 }
307 return result;
308 }
309
310 static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
311 LookupMonitor lookup_f(monitor);
312 return _table->remove(current, lookup_f);
313 }
314
315 static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
316 LookupMonitor lookup_f(monitor);
317 bool result = false;
318 auto found_f = [&](ObjectMonitor** found) {
319 result = true;
320 };
321 _table->get(current, lookup_f, found_f);
322 return result;
323 }
324
325 static void print_on(outputStream* st) {
326 auto printer = [&] (ObjectMonitor** entry) {
327 ObjectMonitor* om = *entry;
328 oop obj = om->object_peek();
329 st->print("monitor=" PTR_FORMAT ", ", p2i(om));
330 st->print("object=" PTR_FORMAT, p2i(obj));
331 assert(objhash(obj) == (uintx)om->hash(), "hash must match");
332 st->cr();
333 return true;
334 };
335 if (SafepointSynchronize::is_at_safepoint()) {
336 _table->do_safepoint_scan(printer);
337 } else {
338 _table->do_scan(Thread::current(), printer);
339 }
340 }
341 };
342
343 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
344 volatile size_t ObjectMonitorTable::_items_count = 0;
345 size_t ObjectMonitorTable::_table_size = 0;
346 volatile bool ObjectMonitorTable::_resize = false;
347
348 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
349 assert(LockingMode == LM_LIGHTWEIGHT, "must be");
350
351 ObjectMonitor* monitor = get_monitor_from_table(current, object);
400 // Hopefully the performance counters are allocated on distinct
401 // cache lines to avoid false sharing on MP systems ...
402 OM_PERFDATA_OP(Inflations, inc());
403 log_inflate(current, object, cause);
404 if (event.should_commit()) {
405 post_monitor_inflate_event(&event, object, cause);
406 }
407
408 // The monitor has an anonymous owner so it is safe from async deflation.
409 ObjectSynchronizer::_in_use_list.add(monitor);
410 }
411
412 return monitor;
413 }
414
415 // Add the hashcode to the monitor to match the object and put it in the hashtable.
416 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
417 assert(UseObjectMonitorTable, "must be");
418 assert(obj == monitor->object(), "must be");
419
420 intptr_t hash = objhash(obj);
421 assert(hash != 0, "must be set when claiming the object monitor");
422 monitor->set_hash(hash);
423
424 return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
425 }
426
427 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
428 assert(UseObjectMonitorTable, "must be");
429 assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
430
431 return ObjectMonitorTable::remove_monitor_entry(current, monitor);
432 }
433
434 void LightweightSynchronizer::deflate_mark_word(oop obj) {
435 assert(UseObjectMonitorTable, "must be");
436
437 markWord mark = obj->mark_acquire();
438 assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
439
440 while (mark.has_monitor()) {
1211
1212 if (mark.has_monitor()) {
1213 ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1214 ObjectSynchronizer::read_monitor(mark);
1215
1216 if (monitor == nullptr) {
1217 // Take the slow-path on a cache miss.
1218 return false;
1219 }
1220
1221 if (monitor->try_enter(current)) {
1222 // ObjectMonitor enter successful.
1223 cache_setter.set_monitor(monitor);
1224 return true;
1225 }
1226 }
1227
1228 // Slow-path.
1229 return false;
1230 }
1231
1232 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1233 assert(UseCompactObjectHeaders, "Only with compact i-hash");
1234 //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1235 assert(mark.is_hashed(), "only from hashed or copied object");
1236 if (mark.is_hashed_expanded()) {
1237 return obj->int_field(klass->hash_offset_in_bytes(obj));
1238 } else {
1239 assert(mark.is_hashed_not_expanded(), "must be hashed");
1240 assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1241 // Already marked as hashed, but not yet copied. Recompute hash and return it.
1242 return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1243 }
1244 }
1245
1246 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1247 return get_hash(mark, obj, mark.klass());
1248 }
|