< prev index next >

src/hotspot/share/runtime/lightweightSynchronizer.cpp

Print this page

  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/perfData.inline.hpp"
  43 #include "runtime/safepointMechanism.inline.hpp"
  44 #include "runtime/safepointVerifiers.hpp"
  45 #include "runtime/synchronizer.inline.hpp"
  46 #include "runtime/timerTrace.hpp"
  47 #include "runtime/trimNativeHeap.hpp"
  48 #include "utilities/concurrentHashTable.inline.hpp"
  49 #include "utilities/concurrentHashTableTasks.inline.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 












  52 // ConcurrentHashTable storing links from objects to ObjectMonitors
  53 class ObjectMonitorTable : AllStatic {
  54   struct Config {
  55     using Value = ObjectMonitor*;
  56     static uintx get_hash(Value const& value, bool* is_dead) {
  57       return (uintx)value->hash();
  58     }
  59     static void* allocate_node(void* context, size_t size, Value const& value) {
  60       ObjectMonitorTable::inc_items_count();
  61       return AllocateHeap(size, mtObjectMonitor);
  62     };
  63     static void free_node(void* context, void* memory, Value const& value) {
  64       ObjectMonitorTable::dec_items_count();
  65       FreeHeap(memory);
  66     }
  67   };
  68   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  69 
  70   static ConcurrentTable* _table;
  71   static volatile size_t _items_count;
  72   static size_t _table_size;
  73   static volatile bool _resize;
  74 
  75   class Lookup : public StackObj {
  76     oop _obj;
  77 
  78    public:
  79     explicit Lookup(oop obj) : _obj(obj) {}
  80 
  81     uintx get_hash() const {
  82       uintx hash = _obj->mark().hash();
  83       assert(hash != 0, "should have a hash");
  84       return hash;
  85     }
  86 
  87     bool equals(ObjectMonitor** value) {
  88       assert(*value != nullptr, "must be");
  89       return (*value)->object_refers_to(_obj);
  90     }
  91 
  92     bool is_dead(ObjectMonitor** value) {
  93       assert(*value != nullptr, "must be");
  94       return false;
  95     }
  96   };
  97 
  98   class LookupMonitor : public StackObj {
  99     ObjectMonitor* _monitor;
 100 
 101    public:
 102     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 103 
 104     uintx get_hash() const {

 266       success = grow(current);
 267     } else {
 268       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 269         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 270       }
 271       lt.print("Start cleaning with load factor %f", get_load_factor());
 272       success = clean(current);
 273     }
 274 
 275     Atomic::store(&_resize, false);
 276 
 277     return success;
 278   }
 279 
 280   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 281     // Enter the monitor into the concurrent hashtable.
 282     ObjectMonitor* result = monitor;
 283     Lookup lookup_f(obj);
 284     auto found_f = [&](ObjectMonitor** found) {
 285       assert((*found)->object_peek() == obj, "must be");

 286       result = *found;
 287     };
 288     bool grow;
 289     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 290     verify_monitor_get_result(obj, result);
 291     if (grow) {
 292       try_notify_grow();
 293     }
 294     return result;
 295   }
 296 
 297   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 298     LookupMonitor lookup_f(monitor);
 299     return _table->remove(current, lookup_f);
 300   }
 301 
 302   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 303     LookupMonitor lookup_f(monitor);
 304     bool result = false;
 305     auto found_f = [&](ObjectMonitor** found) {
 306       result = true;
 307     };
 308     _table->get(current, lookup_f, found_f);
 309     return result;
 310   }
 311 
 312   static void print_on(outputStream* st) {
 313     auto printer = [&] (ObjectMonitor** entry) {
 314        ObjectMonitor* om = *entry;
 315        oop obj = om->object_peek();
 316        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 317        st->print("object=" PTR_FORMAT, p2i(obj));
 318        assert(obj->mark().hash() == om->hash(), "hash must match");
 319        st->cr();
 320        return true;
 321     };
 322     if (SafepointSynchronize::is_at_safepoint()) {
 323       _table->do_safepoint_scan(printer);
 324     } else {
 325       _table->do_scan(Thread::current(), printer);
 326     }
 327   }
 328 };
 329 
 330 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 331 volatile size_t ObjectMonitorTable::_items_count = 0;
 332 size_t ObjectMonitorTable::_table_size = 0;
 333 volatile bool ObjectMonitorTable::_resize = false;
 334 
 335 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 336   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 337 
 338   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 387     // Hopefully the performance counters are allocated on distinct
 388     // cache lines to avoid false sharing on MP systems ...
 389     OM_PERFDATA_OP(Inflations, inc());
 390     log_inflate(current, object, cause);
 391     if (event.should_commit()) {
 392       post_monitor_inflate_event(&event, object, cause);
 393     }
 394 
 395     // The monitor has an anonymous owner so it is safe from async deflation.
 396     ObjectSynchronizer::_in_use_list.add(monitor);
 397   }
 398 
 399   return monitor;
 400 }
 401 
 402 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 403 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 404   assert(UseObjectMonitorTable, "must be");
 405   assert(obj == monitor->object(), "must be");
 406 
 407   intptr_t hash = obj->mark().hash();
 408   assert(hash != 0, "must be set when claiming the object monitor");
 409   monitor->set_hash(hash);
 410 
 411   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 412 }
 413 
 414 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 417 
 418   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 419 }
 420 
 421 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 422   assert(UseObjectMonitorTable, "must be");
 423 
 424   markWord mark = obj->mark_acquire();
 425   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 426 
 427   while (mark.has_monitor()) {

1198 
1199   if (mark.has_monitor()) {
1200     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1201                                                            ObjectSynchronizer::read_monitor(mark);
1202 
1203     if (monitor == nullptr) {
1204       // Take the slow-path on a cache miss.
1205       return false;
1206     }
1207 
1208     if (monitor->try_enter(current)) {
1209       // ObjectMonitor enter successful.
1210       cache_setter.set_monitor(monitor);
1211       return true;
1212     }
1213   }
1214 
1215   // Slow-path.
1216   return false;
1217 }



















  32 #include "runtime/atomic.hpp"
  33 #include "runtime/basicLock.inline.hpp"
  34 #include "runtime/globals_extension.hpp"
  35 #include "runtime/interfaceSupport.inline.hpp"
  36 #include "runtime/javaThread.inline.hpp"
  37 #include "runtime/lightweightSynchronizer.hpp"
  38 #include "runtime/lockStack.inline.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "runtime/objectMonitor.inline.hpp"
  41 #include "runtime/os.hpp"
  42 #include "runtime/perfData.inline.hpp"
  43 #include "runtime/safepointMechanism.inline.hpp"
  44 #include "runtime/safepointVerifiers.hpp"
  45 #include "runtime/synchronizer.inline.hpp"
  46 #include "runtime/timerTrace.hpp"
  47 #include "runtime/trimNativeHeap.hpp"
  48 #include "utilities/concurrentHashTable.inline.hpp"
  49 #include "utilities/concurrentHashTableTasks.inline.hpp"
  50 #include "utilities/globalDefinitions.hpp"
  51 
  52 static uintx objhash(oop obj) {
  53   if (UseCompactObjectHeaders) {
  54     uintx hash = LightweightSynchronizer::get_hash(obj->mark(), obj);
  55     assert(hash != 0, "should have a hash");
  56     return hash;
  57   } else {
  58     uintx hash = obj->mark().hash();
  59     assert(hash != 0, "should have a hash");
  60     return hash;
  61   }
  62 }
  63 
  64 // ConcurrentHashTable storing links from objects to ObjectMonitors
  65 class ObjectMonitorTable : AllStatic {
  66   struct Config {
  67     using Value = ObjectMonitor*;
  68     static uintx get_hash(Value const& value, bool* is_dead) {
  69       return (uintx)value->hash();
  70     }
  71     static void* allocate_node(void* context, size_t size, Value const& value) {
  72       ObjectMonitorTable::inc_items_count();
  73       return AllocateHeap(size, mtObjectMonitor);
  74     };
  75     static void free_node(void* context, void* memory, Value const& value) {
  76       ObjectMonitorTable::dec_items_count();
  77       FreeHeap(memory);
  78     }
  79   };
  80   using ConcurrentTable = ConcurrentHashTable<Config, mtObjectMonitor>;
  81 
  82   static ConcurrentTable* _table;
  83   static volatile size_t _items_count;
  84   static size_t _table_size;
  85   static volatile bool _resize;
  86 
  87   class Lookup : public StackObj {
  88     oop _obj;
  89 
  90    public:
  91     explicit Lookup(oop obj) : _obj(obj) {}
  92 
  93     uintx get_hash() const {
  94       return objhash(_obj);


  95     }
  96 
  97     bool equals(ObjectMonitor** value) {
  98       assert(*value != nullptr, "must be");
  99       return (*value)->object_refers_to(_obj);
 100     }
 101 
 102     bool is_dead(ObjectMonitor** value) {
 103       assert(*value != nullptr, "must be");
 104       return false;
 105     }
 106   };
 107 
 108   class LookupMonitor : public StackObj {
 109     ObjectMonitor* _monitor;
 110 
 111    public:
 112     explicit LookupMonitor(ObjectMonitor* monitor) : _monitor(monitor) {}
 113 
 114     uintx get_hash() const {

 276       success = grow(current);
 277     } else {
 278       if (!_table->is_max_size_reached() && Atomic::load(&_resize)) {
 279         lt.print("WARNING: Getting resize hints with load factor %f", get_load_factor());
 280       }
 281       lt.print("Start cleaning with load factor %f", get_load_factor());
 282       success = clean(current);
 283     }
 284 
 285     Atomic::store(&_resize, false);
 286 
 287     return success;
 288   }
 289 
 290   static ObjectMonitor* monitor_put_get(Thread* current, ObjectMonitor* monitor, oop obj) {
 291     // Enter the monitor into the concurrent hashtable.
 292     ObjectMonitor* result = monitor;
 293     Lookup lookup_f(obj);
 294     auto found_f = [&](ObjectMonitor** found) {
 295       assert((*found)->object_peek() == obj, "must be");
 296       assert(objhash(obj) == (uintx)(*found)->hash(), "hash must match");
 297       result = *found;
 298     };
 299     bool grow;
 300     _table->insert_get(current, lookup_f, monitor, found_f, &grow);
 301     verify_monitor_get_result(obj, result);
 302     if (grow) {
 303       try_notify_grow();
 304     }
 305     return result;
 306   }
 307 
 308   static bool remove_monitor_entry(Thread* current, ObjectMonitor* monitor) {
 309     LookupMonitor lookup_f(monitor);
 310     return _table->remove(current, lookup_f);
 311   }
 312 
 313   static bool contains_monitor(Thread* current, ObjectMonitor* monitor) {
 314     LookupMonitor lookup_f(monitor);
 315     bool result = false;
 316     auto found_f = [&](ObjectMonitor** found) {
 317       result = true;
 318     };
 319     _table->get(current, lookup_f, found_f);
 320     return result;
 321   }
 322 
 323   static void print_on(outputStream* st) {
 324     auto printer = [&] (ObjectMonitor** entry) {
 325        ObjectMonitor* om = *entry;
 326        oop obj = om->object_peek();
 327        st->print("monitor=" PTR_FORMAT ", ", p2i(om));
 328        st->print("object=" PTR_FORMAT, p2i(obj));
 329        assert(objhash(obj) == (uintx)om->hash(), "hash must match");
 330        st->cr();
 331        return true;
 332     };
 333     if (SafepointSynchronize::is_at_safepoint()) {
 334       _table->do_safepoint_scan(printer);
 335     } else {
 336       _table->do_scan(Thread::current(), printer);
 337     }
 338   }
 339 };
 340 
 341 ObjectMonitorTable::ConcurrentTable* ObjectMonitorTable::_table = nullptr;
 342 volatile size_t ObjectMonitorTable::_items_count = 0;
 343 size_t ObjectMonitorTable::_table_size = 0;
 344 volatile bool ObjectMonitorTable::_resize = false;
 345 
 346 ObjectMonitor* LightweightSynchronizer::get_or_insert_monitor_from_table(oop object, JavaThread* current, bool* inserted) {
 347   assert(LockingMode == LM_LIGHTWEIGHT, "must be");
 348 
 349   ObjectMonitor* monitor = get_monitor_from_table(current, object);

 398     // Hopefully the performance counters are allocated on distinct
 399     // cache lines to avoid false sharing on MP systems ...
 400     OM_PERFDATA_OP(Inflations, inc());
 401     log_inflate(current, object, cause);
 402     if (event.should_commit()) {
 403       post_monitor_inflate_event(&event, object, cause);
 404     }
 405 
 406     // The monitor has an anonymous owner so it is safe from async deflation.
 407     ObjectSynchronizer::_in_use_list.add(monitor);
 408   }
 409 
 410   return monitor;
 411 }
 412 
 413 // Add the hashcode to the monitor to match the object and put it in the hashtable.
 414 ObjectMonitor* LightweightSynchronizer::add_monitor(JavaThread* current, ObjectMonitor* monitor, oop obj) {
 415   assert(UseObjectMonitorTable, "must be");
 416   assert(obj == monitor->object(), "must be");
 417 
 418   intptr_t hash = objhash(obj);
 419   assert(hash != 0, "must be set when claiming the object monitor");
 420   monitor->set_hash(hash);
 421 
 422   return ObjectMonitorTable::monitor_put_get(current, monitor, obj);
 423 }
 424 
 425 bool LightweightSynchronizer::remove_monitor(Thread* current, ObjectMonitor* monitor, oop obj) {
 426   assert(UseObjectMonitorTable, "must be");
 427   assert(monitor->object_peek() == obj, "must be, cleared objects are removed by is_dead");
 428 
 429   return ObjectMonitorTable::remove_monitor_entry(current, monitor);
 430 }
 431 
 432 void LightweightSynchronizer::deflate_mark_word(oop obj) {
 433   assert(UseObjectMonitorTable, "must be");
 434 
 435   markWord mark = obj->mark_acquire();
 436   assert(!mark.has_no_hash(), "obj with inflated monitor must have had a hash");
 437 
 438   while (mark.has_monitor()) {

1209 
1210   if (mark.has_monitor()) {
1211     ObjectMonitor* const monitor = UseObjectMonitorTable ? current->om_get_from_monitor_cache(obj) :
1212                                                            ObjectSynchronizer::read_monitor(mark);
1213 
1214     if (monitor == nullptr) {
1215       // Take the slow-path on a cache miss.
1216       return false;
1217     }
1218 
1219     if (monitor->try_enter(current)) {
1220       // ObjectMonitor enter successful.
1221       cache_setter.set_monitor(monitor);
1222       return true;
1223     }
1224   }
1225 
1226   // Slow-path.
1227   return false;
1228 }
1229 
1230 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj, Klass* klass) {
1231   assert(UseCompactObjectHeaders, "Only with compact i-hash");
1232   //assert(mark.is_neutral() | mark.is_fast_locked(), "only from neutral or fast-locked mark: " INTPTR_FORMAT, mark.value());
1233   assert(mark.is_hashed(), "only from hashed or copied object");
1234   if (mark.is_hashed_expanded()) {
1235     return obj->int_field(klass->hash_offset_in_bytes(obj));
1236   } else {
1237     assert(mark.is_hashed_not_expanded(), "must be hashed");
1238     assert(hashCode == 6 || hashCode == 2, "must have idempotent hashCode");
1239     // Already marked as hashed, but not yet copied. Recompute hash and return it.
1240     return ObjectSynchronizer::get_next_hash(nullptr, obj); // recompute hash
1241   }
1242 }
1243 
1244 uint32_t LightweightSynchronizer::get_hash(markWord mark, oop obj) {
1245   return get_hash(mark, obj, mark.klass());
1246 }
< prev index next >