< prev index next >

src/hotspot/share/runtime/lightweightSynchronizer.cpp

Print this page

  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 












  51 // ConcurrentHashTable storing links from objects to ObjectMonitors
  52 class ObjectMonitorTable : AllStatic {
  53   struct Config {
  54     using Value = ObjectMonitor*;
  55     static uintx get_hash(Value const& value, bool* is_dead) {
  56       return (uintx)value->hash();
  57     }
  58     static void* allocate_node(void* context, size_t size, Value const& value) {
  59       ObjectMonitorTable::inc_items_count();
  60       return AllocateHeap(size, mtObjectMonitor);
  61     };
  62     static void free_node(void* context, void* memory, Value const& value) {
  63       ObjectMonitorTable::dec_items_count();
  64       FreeHeap(memory);
  65     }
  66   };
  67   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  68 
  69   static ConcurrentTable* _table;
  70   static volatile size_t _items_count;
  71   static size_t _table_size;
  72   static volatile bool _resize;
  73 
  74   class Lookup : public StackObj {
  75     oop _obj;
  76 
  77    public:
  78     explicit Lookup(oop obj) : _obj(obj) {}
  79 
  80     uintx get_hash() const {
  81       uintx hash = _obj->mark().hash();
  82       assert(hash != 0, "should have a hash");
  83       return hash;
  84     }
  85 
  86     bool equals(ObjectMonitor** value) {
  87       assert(*value != nullptr, "must be");
  88       return (*value)->object_refers_to(_obj);
  89     }
  90 
  91     bool is_dead(ObjectMonitor** value) {
  92       assert(*value != nullptr, "must be");
  93       return false;
  94     }
  95   };
  96 
  97   class LookupMonitor : public StackObj {
  98     ObjectMonitor* _monitor;
  99 
 100    public:
 101     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 102 
 103     uintx get_hash() const {

 266       success = grow(current);
 267     } else {
 268       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 269         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 270       }
 271       lt.print("Start cleaning with load factor %f", get_load_factor());
 272       success = clean(current);
 273     }
 274 
 275     Atomic::store(&_resize, false);
 276 
 277     return success;
 278   }
 279 
 280   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 281     // Enter the monitor into the concurrent hashtable.
 282     ObjectMonitor* result = monitor;
 283     Lookup lookup_f(obj);
 284     auto found_f = [&](ObjectMonitor** found) {
 285       assert((*found)->object_peek() == obj, "must be");

 286       result = *found;
 287     };
 288     bool grow;
 289     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 290     verify_monitor_get_result(obj, result);
 291     if (grow) {
 292       try_notify_grow();
 293     }
 294     return result;
 295   }
 296 
 297   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 298     LookupMonitor lookup_f(monitor);
 299     return _table->remove(current, lookup_f);
 300   }
 301 
 302   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 303     LookupMonitor lookup_f(monitor);
 304     bool result = false;
 305     auto found_f = [&](ObjectMonitor** found) {
 306       result = true;
 307     };
 308     _table->get(current, lookup_f, found_f);
 309     return result;
 310   }
 311 
 312   static void print_on(outputStream* st) {
 313     auto printer = [&] (ObjectMonitor** entry) {
 314        ObjectMonitor* om = *entry;
 315        oop obj = om->object_peek();
 316        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 317        st->print("object=" PTR_FORMAT, p2i(obj));
 318        assert(obj->mark().hash() == om->hash(), "hash must match");
 319        st->cr();
 320        return true;
 321     };
 322     if (SafepointSynchronize::is_at_safepoint()) {
 323       _table->do_safepoint_scan(printer);
 324     } else {
 325       _table->do_scan(Thread::current(), printer);
 326     }
 327   }
 328 };
 329 
 330 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 331 volatile size_t ObjectMonitorTable::_items_count = 0;
 332 size_t ObjectMonitorTable::_table_size = 0;
 333 volatile bool ObjectMonitorTable::_resize = false;
 334 
 335 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 336   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 337 
 338   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 388   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 389 
 390   if (inserted) {
 391     log_inflate(current, object, cause);
 392     if (event.should_commit()) {
 393       post_monitor_inflate_event(&event, object, cause);
 394     }
 395 
 396     // The monitor has an anonymous owner so it is safe from async deflation.
 397     ObjectSynchronizer::_in_use_list.add(monitor);
 398   }
 399 
 400   return monitor;
 401 }
 402 
 403 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 404 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 405   assert(UseObjectMonitorTable, "must be");
 406   assert(obj == monitor->object(), "must be");
 407 
 408   intptr_t hash = obj->mark().hash();
 409   assert(hash != 0, "must be set when claiming the object monitor");
 410   monitor->set_hash(hash);
 411 
 412   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 413 }
 414 
 415 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 416   assert(UseObjectMonitorTable, "must be");
 417   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 418 
 419   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 420 }
 421 
 422 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 423   assert(UseObjectMonitorTable, "must be");
 424 
 425   markWord mark = obj->mark_acquire();
 426   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 427 
 428   while (mark.has_monitor()) {

1224       return false;
1225     }
1226 
1227     if (UseObjectMonitorTable) {
1228       // Set the monitor regardless of success.
1229       // Either we successfully lock on the monitor, or we retry with the
1230       // monitor in the slow path. If the monitor gets deflated, it will be
1231       // cleared, either by the CacheSetter if we fast lock in enter or in
1232       // inflate_and_enter when we see that the monitor is deflated.
1233       lock->set_object_monitor_cache(monitor);
1234     }
1235 
1236     if (monitor->spin_enter(current)) {
1237       return true;
1238     }
1239   }
1240 
1241   // Slow-path.
1242   return false;
1243 }



















  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 static uintx objhash(oop obj) {
  52   if (UseCompactObjectHeaders) {
  53     uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
  54     assert(hash != 0, "should have a hash");
  55     return hash;
  56   } else {
  57     uintx hash = obj->mark().hash();
  58     assert(hash != 0, "should have a hash");
  59     return hash;
  60   }
  61 }
  62 
  63 // ConcurrentHashTable storing links from objects to ObjectMonitors
  64 class ObjectMonitorTable : AllStatic {
  65   struct Config {
  66     using Value = ObjectMonitor*;
  67     static uintx get_hash(Value const& value, bool* is_dead) {
  68       return (uintx)value->hash();
  69     }
  70     static void* allocate_node(void* context, size_t size, Value const& value) {
  71       ObjectMonitorTable::inc_items_count();
  72       return AllocateHeap(size, mtObjectMonitor);
  73     };
  74     static void free_node(void* context, void* memory, Value const& value) {
  75       ObjectMonitorTable::dec_items_count();
  76       FreeHeap(memory);
  77     }
  78   };
  79   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  80 
  81   static ConcurrentTable* _table;
  82   static volatile size_t _items_count;
  83   static size_t _table_size;
  84   static volatile bool _resize;
  85 
  86   class Lookup : public StackObj {
  87     oop _obj;
  88 
  89    public:
  90     explicit Lookup(oop obj) : _obj(obj) {}
  91 
  92     uintx get_hash() const {
  93       return objhash(_obj);


  94     }
  95 
  96     bool equals(ObjectMonitor** value) {
  97       assert(*value != nullptr, "must be");
  98       return (*value)->object_refers_to(_obj);
  99     }
 100 
 101     bool is_dead(ObjectMonitor** value) {
 102       assert(*value != nullptr, "must be");
 103       return false;
 104     }
 105   };
 106 
 107   class LookupMonitor : public StackObj {
 108     ObjectMonitor* _monitor;
 109 
 110    public:
 111     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 112 
 113     uintx get_hash() const {

 276       success = grow(current);
 277     } else {
 278       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 279         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 280       }
 281       lt.print("Start cleaning with load factor %f", get_load_factor());
 282       success = clean(current);
 283     }
 284 
 285     Atomic::store(&_resize, false);
 286 
 287     return success;
 288   }
 289 
 290   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 291     // Enter the monitor into the concurrent hashtable.
 292     ObjectMonitor* result = monitor;
 293     Lookup lookup_f(obj);
 294     auto found_f = [&](ObjectMonitor** found) {
 295       assert((*found)->object_peek() == obj, "must be");
 296       assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
 297       result = *found;
 298     };
 299     bool grow;
 300     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 301     verify_monitor_get_result(obj, result);
 302     if (grow) {
 303       try_notify_grow();
 304     }
 305     return result;
 306   }
 307 
 308   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 309     LookupMonitor lookup_f(monitor);
 310     return _table->remove(current, lookup_f);
 311   }
 312 
 313   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 314     LookupMonitor lookup_f(monitor);
 315     bool result = false;
 316     auto found_f = [&](ObjectMonitor** found) {
 317       result = true;
 318     };
 319     _table->get(current, lookup_f, found_f);
 320     return result;
 321   }
 322 
 323   static void print_on(outputStream* st) {
 324     auto printer = [&] (ObjectMonitor** entry) {
 325        ObjectMonitor* om = *entry;
 326        oop obj = om->object_peek();
 327        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 328        st->print("object=" PTR_FORMAT, p2i(obj));
 329        assert(objhash(obj) == (uintx)om->hash(), "hash must match");
 330        st->cr();
 331        return true;
 332     };
 333     if (SafepointSynchronize::is_at_safepoint()) {
 334       _table->do_safepoint_scan(printer);
 335     } else {
 336       _table->do_scan(Thread::current(), printer);
 337     }
 338   }
 339 };
 340 
 341 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 342 volatile size_t ObjectMonitorTable::_items_count = 0;
 343 size_t ObjectMonitorTable::_table_size = 0;
 344 volatile bool ObjectMonitorTable::_resize = false;
 345 
 346 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 347   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 348 
 349   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 399   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 400 
 401   if (inserted) {
 402     log_inflate(current, object, cause);
 403     if (event.should_commit()) {
 404       post_monitor_inflate_event(&event, object, cause);
 405     }
 406 
 407     // The monitor has an anonymous owner so it is safe from async deflation.
 408     ObjectSynchronizer::_in_use_list.add(monitor);
 409   }
 410 
 411   return monitor;
 412 }
 413 
 414 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 415 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 416   assert(UseObjectMonitorTable, "must be");
 417   assert(obj == monitor->object(), "must be");
 418 
 419   intptr_t hash = objhash(obj);
 420   assert(hash != 0, "must be set when claiming the object monitor");
 421   monitor->set_hash(hash);
 422 
 423   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 424 }
 425 
 426 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 427   assert(UseObjectMonitorTable, "must be");
 428   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 429 
 430   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 431 }
 432 
 433 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 434   assert(UseObjectMonitorTable, "must be");
 435 
 436   markWord mark = obj->mark_acquire();
 437   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 438 
 439   while (mark.has_monitor()) {

1235       return false;
1236     }
1237 
1238     if (UseObjectMonitorTable) {
1239       // Set the monitor regardless of success.
1240       // Either we successfully lock on the monitor, or we retry with the
1241       // monitor in the slow path. If the monitor gets deflated, it will be
1242       // cleared, either by the CacheSetter if we fast lock in enter or in
1243       // inflate_and_enter when we see that the monitor is deflated.
1244       lock->set_object_monitor_cache(monitor);
1245     }
1246 
1247     if (monitor->spin_enter(current)) {
1248       return true;
1249     }
1250   }
1251 
1252   // Slow-path.
1253   return false;
1254 }
1255 
1256 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1257   assert(UseCompactObjectHeaders, "Only with compact i-hash");
1258   //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1259   assert(mark.is_hashed(), "only from hashed or copied object");
1260   if (mark.is_hashed_expanded()) {
1261     return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
1262   } else {
1263     assert(mark.is_hashed_not_expanded(), "must be hashed");
1264     assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1265     // Already marked as hashed, but not yet copied. Recompute hash and return it.
1266     return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1267   }
1268 }
1269 
1270 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1271   return get_hash(mark, obj, mark.klass());
1272 }
< prev index next >