< prev index next >

src/hotspot/share/runtime/lightweightSynchronizer.cpp

Print this page

  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 












  51 // ConcurrentHashTable storing links from objects to ObjectMonitors
  52 class ObjectMonitorTable : AllStatic {
  53   struct Config {
  54     using Value = ObjectMonitor*;
  55     static uintx get_hash(Value const& value, bool* is_dead) {
  56       return (uintx)value->hash();
  57     }
  58     static void* allocate_node(void* context, size_t size, Value const& value) {
  59       ObjectMonitorTable::inc_items_count();
  60       return AllocateHeap(size, mtObjectMonitor);
  61     };
  62     static void free_node(void* context, void* memory, Value const& value) {
  63       ObjectMonitorTable::dec_items_count();
  64       FreeHeap(memory);
  65     }
  66   };
  67   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  68 
  69   static ConcurrentTable* _table;
  70   static volatile size_t _items_count;
  71   static size_t _table_size;
  72   static volatile bool _resize;
  73 
  74   class Lookup : public StackObj {
  75     oop _obj;
  76 
  77    public:
  78     explicit Lookup(oop obj) : _obj(obj) {}
  79 
  80     uintx get_hash() const {
  81       uintx hash = _obj->mark().hash();
  82       assert(hash != 0, "should have a hash");
  83       return hash;
  84     }
  85 
  86     bool equals(ObjectMonitor** value) {
  87       assert(*value != nullptr, "must be");
  88       return (*value)->object_refers_to(_obj);
  89     }
  90 
  91     bool is_dead(ObjectMonitor** value) {
  92       assert(*value != nullptr, "must be");
  93       return false;
  94     }
  95   };
  96 
  97   class LookupMonitor : public StackObj {
  98     ObjectMonitor* _monitor;
  99 
 100    public:
 101     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 102 
 103     uintx get_hash() const {

 265       success = grow(current);
 266     } else {
 267       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 268         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 269       }
 270       lt.print("Start cleaning with load factor %f", get_load_factor());
 271       success = clean(current);
 272     }
 273 
 274     Atomic::store(&_resize, false);
 275 
 276     return success;
 277   }
 278 
 279   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 280     // Enter the monitor into the concurrent hashtable.
 281     ObjectMonitor* result = monitor;
 282     Lookup lookup_f(obj);
 283     auto found_f = [&](ObjectMonitor** found) {
 284       assert((*found)->object_peek() == obj, "must be");

 285       result = *found;
 286     };
 287     bool grow;
 288     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 289     verify_monitor_get_result(obj, result);
 290     if (grow) {
 291       try_notify_grow();
 292     }
 293     return result;
 294   }
 295 
 296   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 297     LookupMonitor lookup_f(monitor);
 298     return _table->remove(current, lookup_f);
 299   }
 300 
 301   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 302     LookupMonitor lookup_f(monitor);
 303     bool result = false;
 304     auto found_f = [&](ObjectMonitor** found) {
 305       result = true;
 306     };
 307     _table->get(current, lookup_f, found_f);
 308     return result;
 309   }
 310 
 311   static void print_on(outputStream* st) {
 312     auto printer = [&] (ObjectMonitor** entry) {
 313        ObjectMonitor* om = *entry;
 314        oop obj = om->object_peek();
 315        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 316        st->print("object=" PTR_FORMAT, p2i(obj));
 317        assert(obj->mark().hash() == om->hash(), "hash must match");
 318        st->cr();
 319        return true;
 320     };
 321     if (SafepointSynchronize::is_at_safepoint()) {
 322       _table->do_safepoint_scan(printer);
 323     } else {
 324       _table->do_scan(Thread::current(), printer);
 325     }
 326   }
 327 };
 328 
 329 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 330 volatile size_t ObjectMonitorTable::_items_count = 0;
 331 size_t ObjectMonitorTable::_table_size = 0;
 332 volatile bool ObjectMonitorTable::_resize = false;
 333 
 334 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 335   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 336 
 337   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 387   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 388 
 389   if (inserted) {
 390     log_inflate(current, object, cause);
 391     if (event.should_commit()) {
 392       post_monitor_inflate_event(&event, object, cause);
 393     }
 394 
 395     // The monitor has an anonymous owner so it is safe from async deflation.
 396     ObjectSynchronizer::_in_use_list.add(monitor);
 397   }
 398 
 399   return monitor;
 400 }
 401 
 402 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 403 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 404   assert(UseObjectMonitorTable, "must be");
 405   assert(obj == monitor->object(), "must be");
 406 
 407   intptr_t hash = obj->mark().hash();
 408   assert(hash != 0, "must be set when claiming the object monitor");
 409   monitor->set_hash(hash);
 410 
 411   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 412 }
 413 
 414 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 417 
 418   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 419 }
 420 
 421 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 422   assert(UseObjectMonitorTable, "must be");
 423 
 424   markWord mark = obj->mark_acquire();
 425   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 426 
 427   while (mark.has_monitor()) {

1192 
1193   if (mark.has_monitor()) {
1194     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1195                                                            ObjectSynchronizer::read_monitor(mark);
1196 
1197     if (monitor == nullptr) {
1198       // Take the slow-path on a cache miss.
1199       return false;
1200     }
1201 
1202     if (monitor->try_enter(current)) {
1203       // ObjectMonitor enter successful.
1204       cache_setter.set_monitor(monitor);
1205       return true;
1206     }
1207   }
1208 
1209   // Slow-path.
1210   return false;
1211 }



















  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/safepointMechanism.inline.hpp"
  43 #include "runtime/safepointVerifiers.hpp"
  44 #include "runtime/synchronizer.inline.hpp"
  45 #include "runtime/timerTrace.hpp"
  46 #include "runtime/trimNativeHeap.hpp"
  47 #include "utilities/concurrentHashTable.inline.hpp"
  48 #include "utilities/concurrentHashTableTasks.inline.hpp"
  49 #include "utilities/globalDefinitions.hpp"
  50 
  51 static uintx objhash(oop obj) {
  52   if (UseCompactObjectHeaders) {
  53     uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
  54     assert(hash != 0, "should have a hash");
  55     return hash;
  56   } else {
  57     uintx hash = obj->mark().hash();
  58     assert(hash != 0, "should have a hash");
  59     return hash;
  60   }
  61 }
  62 
  63 // ConcurrentHashTable storing links from objects to ObjectMonitors
  64 class ObjectMonitorTable : AllStatic {
  65   struct Config {
  66     using Value = ObjectMonitor*;
  67     static uintx get_hash(Value const& value, bool* is_dead) {
  68       return (uintx)value->hash();
  69     }
  70     static void* allocate_node(void* context, size_t size, Value const& value) {
  71       ObjectMonitorTable::inc_items_count();
  72       return AllocateHeap(size, mtObjectMonitor);
  73     };
  74     static void free_node(void* context, void* memory, Value const& value) {
  75       ObjectMonitorTable::dec_items_count();
  76       FreeHeap(memory);
  77     }
  78   };
  79   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  80 
  81   static ConcurrentTable* _table;
  82   static volatile size_t _items_count;
  83   static size_t _table_size;
  84   static volatile bool _resize;
  85 
  86   class Lookup : public StackObj {
  87     oop _obj;
  88 
  89    public:
  90     explicit Lookup(oop obj) : _obj(obj) {}
  91 
  92     uintx get_hash() const {
  93       return objhash(_obj);


  94     }
  95 
  96     bool equals(ObjectMonitor** value) {
  97       assert(*value != nullptr, "must be");
  98       return (*value)->object_refers_to(_obj);
  99     }
 100 
 101     bool is_dead(ObjectMonitor** value) {
 102       assert(*value != nullptr, "must be");
 103       return false;
 104     }
 105   };
 106 
 107   class LookupMonitor : public StackObj {
 108     ObjectMonitor* _monitor;
 109 
 110    public:
 111     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 112 
 113     uintx get_hash() const {

 275       success = grow(current);
 276     } else {
 277       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 278         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 279       }
 280       lt.print("Start cleaning with load factor %f", get_load_factor());
 281       success = clean(current);
 282     }
 283 
 284     Atomic::store(&_resize, false);
 285 
 286     return success;
 287   }
 288 
 289   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 290     // Enter the monitor into the concurrent hashtable.
 291     ObjectMonitor* result = monitor;
 292     Lookup lookup_f(obj);
 293     auto found_f = [&](ObjectMonitor** found) {
 294       assert((*found)->object_peek() == obj, "must be");
 295       assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
 296       result = *found;
 297     };
 298     bool grow;
 299     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 300     verify_monitor_get_result(obj, result);
 301     if (grow) {
 302       try_notify_grow();
 303     }
 304     return result;
 305   }
 306 
 307   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 308     LookupMonitor lookup_f(monitor);
 309     return _table->remove(current, lookup_f);
 310   }
 311 
 312   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 313     LookupMonitor lookup_f(monitor);
 314     bool result = false;
 315     auto found_f = [&](ObjectMonitor** found) {
 316       result = true;
 317     };
 318     _table->get(current, lookup_f, found_f);
 319     return result;
 320   }
 321 
 322   static void print_on(outputStream* st) {
 323     auto printer = [&] (ObjectMonitor** entry) {
 324        ObjectMonitor* om = *entry;
 325        oop obj = om->object_peek();
 326        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 327        st->print("object=" PTR_FORMAT, p2i(obj));
 328        assert(objhash(obj) == (uintx)om->hash(), "hash must match");
 329        st->cr();
 330        return true;
 331     };
 332     if (SafepointSynchronize::is_at_safepoint()) {
 333       _table->do_safepoint_scan(printer);
 334     } else {
 335       _table->do_scan(Thread::current(), printer);
 336     }
 337   }
 338 };
 339 
 340 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 341 volatile size_t ObjectMonitorTable::_items_count = 0;
 342 size_t ObjectMonitorTable::_table_size = 0;
 343 volatile bool ObjectMonitorTable::_resize = false;
 344 
 345 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 346   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 347 
 348   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 398   ObjectMonitor* monitor = get_or_insert_monitor_from_table(object, current, &inserted);
 399 
 400   if (inserted) {
 401     log_inflate(current, object, cause);
 402     if (event.should_commit()) {
 403       post_monitor_inflate_event(&event, object, cause);
 404     }
 405 
 406     // The monitor has an anonymous owner so it is safe from async deflation.
 407     ObjectSynchronizer::_in_use_list.add(monitor);
 408   }
 409 
 410   return monitor;
 411 }
 412 
 413 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 414 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(obj == monitor->object(), "must be");
 417 
 418   intptr_t hash = objhash(obj);
 419   assert(hash != 0, "must be set when claiming the object monitor");
 420   monitor->set_hash(hash);
 421 
 422   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 423 }
 424 
 425 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 426   assert(UseObjectMonitorTable, "must be");
 427   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 428 
 429   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 430 }
 431 
 432 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 433   assert(UseObjectMonitorTable, "must be");
 434 
 435   markWord mark = obj->mark_acquire();
 436   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 437 
 438   while (mark.has_monitor()) {

1203 
1204   if (mark.has_monitor()) {
1205     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1206                                                            ObjectSynchronizer::read_monitor(mark);
1207 
1208     if (monitor == nullptr) {
1209       // Take the slow-path on a cache miss.
1210       return false;
1211     }
1212 
1213     if (monitor->try_enter(current)) {
1214       // ObjectMonitor enter successful.
1215       cache_setter.set_monitor(monitor);
1216       return true;
1217     }
1218   }
1219 
1220   // Slow-path.
1221   return false;
1222 }
1223 
1224 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1225   assert(UseCompactObjectHeaders, "Only with compact i-hash");
1226   //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1227   assert(mark.is_hashed(), "only from hashed or copied object");
1228   if (mark.is_hashed_expanded()) {
1229     return obj->int_field(klass->hash_offset_in_bytes(obj, mark));
1230   } else {
1231     assert(mark.is_hashed_not_expanded(), "must be hashed");
1232     assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1233     // Already marked as hashed, but not yet copied. Recompute hash and return it.
1234     return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1235   }
1236 }
1237 
1238 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1239   return get_hash(mark, obj, mark.klass());
1240 }
< prev index next >