< prev index next >

src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp

Print this page
*** 142,25 ***
  void ObjectSampler::release() {
    OrderAccess::fence();
    _lock = 0;
  }
  
! static traceid get_thread_id(JavaThread* thread) {
    assert(thread != NULL, "invariant");
    if (thread->threadObj() == NULL) {
      return 0;
    }
    const JfrThreadLocal* const tl = thread->jfr_thread_local();
    assert(tl != NULL, "invariant");
    if (tl->is_excluded()) {
      return 0;
    }
    if (!tl->has_thread_blob()) {
!     JfrCheckpointManager::create_thread_blob(thread);
    }
!   assert(tl->has_thread_blob(), "invariant");
-   return tl->thread_id();
  }
  
  class RecordStackTrace {
   private:
    JavaThread* _jt;
--- 142,39 ---
  void ObjectSampler::release() {
    OrderAccess::fence();
    _lock = 0;
  }
  
! static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) {
    assert(thread != NULL, "invariant");
+   assert(virtual_thread != NULL, "invariant");
    if (thread->threadObj() == NULL) {
      return 0;
    }
    const JfrThreadLocal* const tl = thread->jfr_thread_local();
    assert(tl != NULL, "invariant");
    if (tl->is_excluded()) {
      return 0;
    }
+   return JfrThreadLocal::thread_id(thread, virtual_thread);
+ }
+ 
+ static JfrBlobHandle get_thread_blob(JavaThread* thread, traceid tid, bool virtual_thread) {
+   assert(thread != NULL, "invariant");
+   JfrThreadLocal* const tl = thread->jfr_thread_local();
+   assert(tl != NULL, "invariant");
+   assert(!tl->is_excluded(), "invariant");
+   if (virtual_thread) {
+     // TODO: blob cache for virtual threads
+     return JfrCheckpointManager::create_thread_blob(thread, tid, thread->vthread());
+   }
    if (!tl->has_thread_blob()) {
!     // for regular threads, the blob is cached in the thread local data structure
+     tl->set_thread_blob(JfrCheckpointManager::create_thread_blob(thread, tid));
+     assert(tl->has_thread_blob(), "invariant");
    }
!   return tl->thread_blob();
  }
  
  class RecordStackTrace {
   private:
    JavaThread* _jt;

*** 180,29 ***
  };
  
  void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
    assert(thread != NULL, "invariant");
    assert(is_created(), "invariant");
!   const traceid thread_id = get_thread_id(thread);
    if (thread_id == 0) {
      return;
    }
    RecordStackTrace rst(thread);
    // try enter critical section
    JfrTryLock tryLock(&_lock);
    if (!tryLock.acquired()) {
      log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
      return;
    }
!   instance().add(obj, allocated, thread_id, thread);
  }
  
! void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {
    assert(obj != NULL, "invariant");
    assert(thread_id != 0, "invariant");
    assert(thread != NULL, "invariant");
-   assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
  
    if (Atomic::load(&_dead_samples)) {
      // There's a small race where a GC scan might reset this to true, potentially
      // causing a back-to-back scavenge.
      Atomic::store(&_dead_samples, false);
--- 194,31 ---
  };
  
  void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
    assert(thread != NULL, "invariant");
    assert(is_created(), "invariant");
!   bool virtual_thread = false;
+   const traceid thread_id = get_thread_id(thread, &virtual_thread);
    if (thread_id == 0) {
      return;
    }
+   const JfrBlobHandle bh = get_thread_blob(thread, thread_id, virtual_thread);
+   assert(bh.valid(), "invariant");
    RecordStackTrace rst(thread);
    // try enter critical section
    JfrTryLock tryLock(&_lock);
    if (!tryLock.acquired()) {
      log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
      return;
    }
!   instance().add(obj, allocated, thread_id, virtual_thread, bh, thread);
  }
  
! void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread) {
    assert(obj != NULL, "invariant");
    assert(thread_id != 0, "invariant");
    assert(thread != NULL, "invariant");
  
    if (Atomic::load(&_dead_samples)) {
      // There's a small race where a GC scan might reset this to true, potentially
      // causing a back-to-back scavenge.
      Atomic::store(&_dead_samples, false);

*** 224,14 ***
      sample = _list->get();
    }
  
    assert(sample != NULL, "invariant");
    sample->set_thread_id(thread_id);
  
    const JfrThreadLocal* const tl = thread->jfr_thread_local();
-   sample->set_thread(tl->thread_blob());
- 
    const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
    if (stacktrace_hash != 0) {
      sample->set_stack_trace_id(tl->cached_stack_trace_id());
      sample->set_stack_trace_hash(stacktrace_hash);
    }
--- 240,16 ---
      sample = _list->get();
    }
  
    assert(sample != NULL, "invariant");
    sample->set_thread_id(thread_id);
+   if (virtual_thread) {
+     sample->set_thread_is_virtual();
+   }
+   sample->set_thread(bh);
  
    const JfrThreadLocal* const tl = thread->jfr_thread_local();
    const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
    if (stacktrace_hash != 0) {
      sample->set_stack_trace_id(tl->cached_stack_trace_id());
      sample->set_stack_trace_hash(stacktrace_hash);
    }
< prev index next >