< prev index next >

src/hotspot/share/runtime/vmOperations.cpp

Print this page
*** 41,16 ***
--- 41,18 ---
  #include "runtime/deoptimization.hpp"
  #include "runtime/frame.inline.hpp"
  #include "runtime/interfaceSupport.inline.hpp"
  #include "runtime/javaThread.inline.hpp"
  #include "runtime/jniHandles.hpp"
+ #include "runtime/objectMonitor.inline.hpp"
  #include "runtime/stackFrameStream.inline.hpp"
  #include "runtime/synchronizer.hpp"
  #include "runtime/threads.hpp"
  #include "runtime/threadSMR.inline.hpp"
  #include "runtime/vmOperations.hpp"
  #include "services/threadService.hpp"
+ #include "utilities/ticks.hpp"
  
  #define VM_OP_NAME_INITIALIZE(name) #name,
  
  const char* VM_Operation::_names[VM_Operation::VMOp_Terminating] = \
    { VM_OPS_DO(VM_OP_NAME_INITIALIZE) };

*** 263,10 ***
--- 265,115 ---
      // Release Heap_lock
      Heap_lock->unlock();
    }
  }
  
+ // Hash table of void* to a list of ObjectMonitor* owned by the JavaThread.
+ // The JavaThread's owner key is either a JavaThread* or a stack lock
+ // address in the JavaThread so we use "void*".
+ //
+ class ObjectMonitorsDump : public MonitorClosure, public ObjectMonitorsView {
+  private:
+   static unsigned int ptr_hash(void* const& s1) {
+     // 2654435761 = 2^32 * Phi (golden ratio)
+     return (unsigned int)(((uint32_t)(uintptr_t)s1) * 2654435761u);
+   }
+ 
+  private:
+   class ObjectMonitorLinkedList :
+     public LinkedListImpl<ObjectMonitor*,
+                           AnyObj::C_HEAP, mtThread,
+                           AllocFailStrategy::RETURN_NULL> {};
+ 
+   // ResourceHashtable SIZE is specified at compile time so we
+   // use 1031 which is the first prime after 1024.
+   typedef ResourceHashtable<void*, ObjectMonitorLinkedList*, 1031, AnyObj::C_HEAP, mtThread,
+                             &ObjectMonitorsDump::ptr_hash> PtrTable;
+   PtrTable* _ptrs;
+   size_t _key_count;
+   size_t _om_count;
+ 
+   void add_list(void* key, ObjectMonitorLinkedList* list) {
+     _ptrs->put(key, list);
+     _key_count++;
+   }
+ 
+   ObjectMonitorLinkedList* get_list(void* key) {
+     ObjectMonitorLinkedList** listpp = _ptrs->get(key);
+     return (listpp == nullptr) ? nullptr : *listpp;
+   }
+ 
+   void add(ObjectMonitor* monitor) {
+     void* key = monitor->owner();
+ 
+     ObjectMonitorLinkedList* list = get_list(key);
+     if (list == nullptr) {
+       // Create new list and add it to the hash table:
+       list = new (mtThread) ObjectMonitorLinkedList;
+       _ptrs->put(key, list);
+       _key_count++;
+     }
+ 
+     assert(list->find(monitor) == nullptr, "Should not contain duplicates");
+     list->add(monitor);  // Add the ObjectMonitor to the list.
+     _om_count++;
+   }
+ 
+  public:
+   // ResourceHashtable is passed to various functions and populated in
+   // different places so we allocate it using C_HEAP to make it immune
+   // from any ResourceMarks that happen to be in the code paths.
+   ObjectMonitorsDump() : _ptrs(new (mtThread) PtrTable), _key_count(0), _om_count(0) {}
+ 
+   ~ObjectMonitorsDump() {
+     class CleanupObjectMonitorsDump: StackObj {
+      public:
+       bool do_entry(void*& key, ObjectMonitorLinkedList*& list) {
+         list->clear();  // clear the LinkListNodes
+         delete list;    // then delete the LinkedList
+         return true;
+       }
+     } cleanup;
+ 
+     _ptrs->unlink(&cleanup);  // cleanup the LinkedLists
+     delete _ptrs;             // then delete the hash table
+   }
+ 
+   // Implements MonitorClosure used to collect all owned monitors in the system
+   void do_monitor(ObjectMonitor* monitor) override {
+     assert(monitor->has_owner(), "Expects only owned monitors");
+ 
+     if (monitor->is_owner_anonymous()) {
+       // There's no need to collect anonymous owned monitors
+       // because the caller of this code is only interested
+       // in JNI owned monitors.
+       return;
+     }
+ 
+     if (monitor->object_peek() == nullptr) {
+       // JNI code doesn't necessarily keep the monitor object
+       // alive. Filter out monitors with dead objects.
+       return;
+     }
+ 
+     add(monitor);
+   }
+ 
+   // Implements the ObjectMonitorsView interface
+   void visit(MonitorClosure* closure, JavaThread* thread) override {
+     ObjectMonitorLinkedList* list = get_list(thread);
+     LinkedListIterator<ObjectMonitor*> iter(list != nullptr ? list->head() : nullptr);
+     while (!iter.is_empty()) {
+       ObjectMonitor* monitor = *iter.next();
+       closure->do_monitor(monitor);
+     }
+   }
+ 
+   size_t key_count() { return _key_count; }
+   size_t om_count() { return _om_count; }
+ };
+ 
  void VM_ThreadDump::doit() {
    ResourceMark rm;
  
    // Set the hazard ptr in the originating thread to protect the
    // current list of threads. This VM operation needs the current list

*** 277,20 ***
    ConcurrentLocksDump concurrent_locks(true);
    if (_with_locked_synchronizers) {
      concurrent_locks.dump_at_safepoint();
    }
  
!   ObjectMonitorsHashtable table;
-   ObjectMonitorsHashtable* tablep = nullptr;
    if (_with_locked_monitors) {
!     // The caller wants locked monitor information and that's expensive to gather
!     // when there are a lot of inflated monitors. So we deflate idle monitors and
!     // gather information about owned monitors at the same time.
!     tablep = &table;
!     while (ObjectSynchronizer::deflate_idle_monitors(tablep) > 0) {
!       ; /* empty */
!     }
    }
  
    if (_num_threads == 0) {
      // Snapshot all live threads
  
--- 384,24 ---
    ConcurrentLocksDump concurrent_locks(true);
    if (_with_locked_synchronizers) {
      concurrent_locks.dump_at_safepoint();
    }
  
!   ObjectMonitorsDump object_monitors;
    if (_with_locked_monitors) {
!     // Gather information about owned monitors.
!     ObjectSynchronizer::owned_monitors_iterate(&object_monitors);
! 
!     // If there are many object monitors in the system then the above iteration
!     // can start to take time. Be friendly to following thread dumps by telling
!     // the MonitorDeflationThread to deflate monitors.
!     //
+     // This is trying to be somewhat backwards compatible with the previous
+     // implementation, which performed monitor deflation right here. We might
+     // want to reconsider the need to trigger monitor deflation from the thread
+     // dumping and instead maybe tweak the deflation heuristics.
+     ObjectSynchronizer::request_deflate_idle_monitors();
    }
  
    if (_num_threads == 0) {
      // Snapshot all live threads
  

*** 303,11 ***
        }
        ThreadConcurrentLocks* tcl = nullptr;
        if (_with_locked_synchronizers) {
          tcl = concurrent_locks.thread_concurrent_locks(jt);
        }
!       snapshot_thread(jt, tcl, tablep);
      }
    } else {
      // Snapshot threads in the given _threads array
      // A dummy snapshot is created if a thread doesn't exist
  
--- 414,11 ---
        }
        ThreadConcurrentLocks* tcl = nullptr;
        if (_with_locked_synchronizers) {
          tcl = concurrent_locks.thread_concurrent_locks(jt);
        }
!       snapshot_thread(jt, tcl, &object_monitors);
      }
    } else {
      // Snapshot threads in the given _threads array
      // A dummy snapshot is created if a thread doesn't exist
  

*** 338,19 ***
        }
        ThreadConcurrentLocks* tcl = nullptr;
        if (_with_locked_synchronizers) {
          tcl = concurrent_locks.thread_concurrent_locks(jt);
        }
!       snapshot_thread(jt, tcl, tablep);
      }
    }
  }
  
  void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
!                                     ObjectMonitorsHashtable* table) {
    ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
!   snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, table, false);
    snapshot->set_concurrent_locks(tcl);
  }
  
  volatile bool VM_Exit::_vm_exited = false;
  Thread * volatile VM_Exit::_shutdown_thread = nullptr;
--- 449,19 ---
        }
        ThreadConcurrentLocks* tcl = nullptr;
        if (_with_locked_synchronizers) {
          tcl = concurrent_locks.thread_concurrent_locks(jt);
        }
!       snapshot_thread(jt, tcl, &object_monitors);
      }
    }
  }
  
  void VM_ThreadDump::snapshot_thread(JavaThread* java_thread, ThreadConcurrentLocks* tcl,
!                                     ObjectMonitorsView* monitors) {
    ThreadSnapshot* snapshot = _result->add_thread_snapshot(java_thread);
!   snapshot->dump_stack_at_safepoint(_max_depth, _with_locked_monitors, monitors, false);
    snapshot->set_concurrent_locks(tcl);
  }
  
  volatile bool VM_Exit::_vm_exited = false;
  Thread * volatile VM_Exit::_shutdown_thread = nullptr;
< prev index next >