1 /*
  2  * Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "jfr/jfrEvents.hpp"
 27 #include "jfr/recorder/jfrRecorder.hpp"
 28 #include "jfr/periodic/sampling/jfrCallTrace.hpp"
 29 #include "jfr/periodic/sampling/jfrThreadSampler.hpp"
 30 #include "jfr/recorder/checkpoint/types/traceid/jfrTraceIdLoadBarrier.inline.hpp"
 31 #include "jfr/recorder/service/jfrOptionSet.hpp"
 32 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 33 #include "jfr/recorder/storage/jfrBuffer.hpp"
 34 #include "jfr/support/jfrThreadId.hpp"
 35 #include "jfr/support/jfrThreadLocal.hpp"
 36 #include "jfr/utilities/jfrTime.hpp"
 37 #include "jfrfiles/jfrEventClasses.hpp"
 38 #include "logging/log.hpp"
 39 #include "runtime/frame.inline.hpp"
 40 #include "runtime/os.hpp"
 41 #include "runtime/semaphore.hpp"
 42 #include "runtime/thread.inline.hpp"
 43 #include "runtime/threadSMR.hpp"
 44 
 45 enum JfrSampleType {
 46   NO_SAMPLE = 0,
 47   JAVA_SAMPLE = 1,
 48   NATIVE_SAMPLE = 2
 49 };
 50 
 51 static bool thread_state_in_java(JavaThread* thread) {
 52   assert(thread != NULL, "invariant");
 53   switch(thread->thread_state()) {
 54     case _thread_new:
 55     case _thread_uninitialized:
 56     case _thread_new_trans:
 57     case _thread_in_vm_trans:
 58     case _thread_blocked_trans:
 59     case _thread_in_native_trans:
 60     case _thread_blocked:
 61     case _thread_in_vm:
 62     case _thread_in_native:
 63     case _thread_in_Java_trans:
 64       break;
 65     case _thread_in_Java:
 66       return true;
 67     default:
 68       ShouldNotReachHere();
 69       break;
 70   }
 71   return false;
 72 }
 73 
 74 static bool thread_state_in_native(JavaThread* thread) {
 75   assert(thread != NULL, "invariant");
 76   switch(thread->thread_state()) {
 77     case _thread_new:
 78     case _thread_uninitialized:
 79     case _thread_new_trans:
 80     case _thread_blocked_trans:
 81     case _thread_blocked:
 82     case _thread_in_vm:
 83     case _thread_in_vm_trans:
 84     case _thread_in_Java_trans:
 85     case _thread_in_Java:
 86     case _thread_in_native_trans:
 87       break;
 88     case _thread_in_native:
 89       return true;
 90     default:
 91       ShouldNotReachHere();
 92       break;
 93   }
 94   return false;
 95 }
 96 
 97 class JfrThreadSampleClosure {
 98  public:
 99   JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native);
100   ~JfrThreadSampleClosure() {}
101   EventExecutionSample* next_event() { return &_events[_added_java++]; }
102   EventNativeMethodSample* next_event_native() { return &_events_native[_added_native++]; }
103   void commit_events(JfrSampleType type);
104   bool do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type);
105   uint java_entries() { return _added_java; }
106   uint native_entries() { return _added_native; }
107 
108  private:
109   bool sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
110   bool sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames);
111   EventExecutionSample* _events;
112   EventNativeMethodSample* _events_native;
113   Thread* _self;
114   uint _added_java;
115   uint _added_native;
116 };
117 
118 class OSThreadSampler : public os::SuspendedThreadTask {
119  public:
120   OSThreadSampler(JavaThread* thread,
121                   JfrThreadSampleClosure& closure,
122                   JfrStackFrame *frames,
123                   u4 max_frames) : os::SuspendedThreadTask((Thread*)thread),
124     _success(false),
125     _thread_oop(thread->threadObj()),
126     _stacktrace(frames, max_frames),
127     _closure(closure),
128     _suspend_time() {}
129 
130   void take_sample();
131   void do_task(const os::SuspendedThreadTaskContext& context);
132   void protected_task(const os::SuspendedThreadTaskContext& context);
133   bool success() const { return _success; }
134   const JfrStackTrace& stacktrace() const { return _stacktrace; }
135 
136  private:
137   bool _success;
138   oop _thread_oop;
139   JfrStackTrace _stacktrace;
140   JfrThreadSampleClosure& _closure;
141   JfrTicks _suspend_time;
142 };
143 
144 class OSThreadSamplerCallback : public os::CrashProtectionCallback {
145  public:
146   OSThreadSamplerCallback(OSThreadSampler& sampler, const os::SuspendedThreadTaskContext &context) :
147     _sampler(sampler), _context(context) {
148   }
149   virtual void call() {
150     _sampler.protected_task(_context);
151   }
152  private:
153   OSThreadSampler& _sampler;
154   const os::SuspendedThreadTaskContext& _context;
155 };
156 
157 void OSThreadSampler::do_task(const os::SuspendedThreadTaskContext& context) {
158 #ifndef ASSERT
159   guarantee(JfrOptionSet::sample_protection(), "Sample Protection should be on in product builds");
160 #endif
161   assert(_suspend_time.value() == 0, "already timestamped!");
162   _suspend_time = JfrTicks::now();
163 
164   if (JfrOptionSet::sample_protection()) {
165     OSThreadSamplerCallback cb(*this, context);
166     os::ThreadCrashProtection crash_protection;
167     if (!crash_protection.call(cb)) {
168       log_error(jfr)("Thread method sampler crashed");
169     }
170   } else {
171     protected_task(context);
172   }
173 }
174 
175 /*
176 * From this method and down the call tree we attempt to protect against crashes
177 * using a signal handler / __try block. Don't take locks, rely on destructors or
178 * leave memory (in case of signal / exception) in an inconsistent state. */
179 void OSThreadSampler::protected_task(const os::SuspendedThreadTaskContext& context) {
180   JavaThread* jth = JavaThread::cast(context.thread());
181   // Skip sample if we signaled a thread that moved to other state
182   if (!thread_state_in_java(jth)) {
183     return;
184   }
185   JfrGetCallTrace trace(true, jth);
186   frame topframe;
187   if (trace.get_topframe(context.ucontext(), topframe)) {
188     if (_stacktrace.record_thread(*jth, topframe)) {
189       /* If we managed to get a topframe and a stacktrace, create an event
190       * and put it into our array. We can't call Jfr::_stacktraces.add()
191       * here since it would allocate memory using malloc. Doing so while
192       * the stopped thread is inside malloc would deadlock. */
193       _success = true;
194       EventExecutionSample *ev = _closure.next_event();
195       ev->set_starttime(_suspend_time);
196       ev->set_endtime(_suspend_time); // fake to not take an end time
197       ev->set_sampledThread(JFR_THREAD_ID(jth));
198       ev->set_state(static_cast<u8>(java_lang_Thread::get_thread_status(_thread_oop)));
199     }
200   }
201 }
202 
203 void OSThreadSampler::take_sample() {
204   run();
205 }
206 
207 class JfrNativeSamplerCallback : public os::CrashProtectionCallback {
208  public:
209   JfrNativeSamplerCallback(JfrThreadSampleClosure& closure, JavaThread* jt, JfrStackFrame* frames, u4 max_frames) :
210     _closure(closure), _jt(jt), _thread_oop(jt->threadObj()), _stacktrace(frames, max_frames), _success(false) {
211   }
212   virtual void call();
213   bool success() { return _success; }
214   JfrStackTrace& stacktrace() { return _stacktrace; }
215 
216  private:
217   JfrThreadSampleClosure& _closure;
218   JavaThread* _jt;
219   oop _thread_oop;
220   JfrStackTrace _stacktrace;
221   bool _success;
222 };
223 
224 static void write_native_event(JfrThreadSampleClosure& closure, JavaThread* jt, oop thread_oop) {
225   EventNativeMethodSample *ev = closure.next_event_native();
226   ev->set_starttime(JfrTicks::now());
227   ev->set_sampledThread(JFR_THREAD_ID(jt));
228   ev->set_state(static_cast<u8>(java_lang_Thread::get_thread_status(thread_oop)));
229 }
230 
231 void JfrNativeSamplerCallback::call() {
232   // When a thread is only attach it will be native without a last java frame
233   if (!_jt->has_last_Java_frame()) {
234     return;
235   }
236 
237   frame topframe = _jt->last_frame();
238   frame first_java_frame;
239   Method* method = NULL;
240   JfrGetCallTrace gct(false, _jt);
241   if (!gct.find_top_frame(topframe, &method, first_java_frame)) {
242     return;
243   }
244   if (method == NULL) {
245     return;
246   }
247   topframe = first_java_frame;
248   _success = _stacktrace.record_thread(*_jt, topframe);
249   if (_success) {
250     write_native_event(_closure, _jt, _thread_oop);
251   }
252 }
253 
254 bool JfrThreadSampleClosure::sample_thread_in_java(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
255   OSThreadSampler sampler(thread, *this, frames, max_frames);
256   sampler.take_sample();
257   /* We don't want to allocate any memory using malloc/etc while the thread
258   * is stopped, so everything is stored in stack allocated memory until this
259   * point where the thread has been resumed again, if the sampling was a success
260   * we need to store the stacktrace in the stacktrace repository and update
261   * the event with the id that was returned. */
262   if (!sampler.success()) {
263     return false;
264   }
265   EventExecutionSample *event = &_events[_added_java - 1];
266   traceid id = JfrStackTraceRepository::add(sampler.stacktrace());
267   assert(id != 0, "Stacktrace id should not be 0");
268   event->set_stackTrace(id);
269   return true;
270 }
271 
272 bool JfrThreadSampleClosure::sample_thread_in_native(JavaThread* thread, JfrStackFrame* frames, u4 max_frames) {
273   JfrNativeSamplerCallback cb(*this, thread, frames, max_frames);
274   if (JfrOptionSet::sample_protection()) {
275     os::ThreadCrashProtection crash_protection;
276     if (!crash_protection.call(cb)) {
277       log_error(jfr)("Thread method sampler crashed for native");
278     }
279   } else {
280     cb.call();
281   }
282   if (!cb.success()) {
283     return false;
284   }
285   EventNativeMethodSample *event = &_events_native[_added_native - 1];
286   traceid id = JfrStackTraceRepository::add(cb.stacktrace());
287   assert(id != 0, "Stacktrace id should not be 0");
288   event->set_stackTrace(id);
289   return true;
290 }
291 
292 static const uint MAX_NR_OF_JAVA_SAMPLES = 5;
293 static const uint MAX_NR_OF_NATIVE_SAMPLES = 1;
294 
295 void JfrThreadSampleClosure::commit_events(JfrSampleType type) {
296   if (JAVA_SAMPLE == type) {
297     assert(_added_java > 0 && _added_java <= MAX_NR_OF_JAVA_SAMPLES, "invariant");
298     for (uint i = 0; i < _added_java; ++i) {
299       _events[i].commit();
300     }
301   } else {
302     assert(NATIVE_SAMPLE == type, "invariant");
303     assert(_added_native > 0 && _added_native <= MAX_NR_OF_NATIVE_SAMPLES, "invariant");
304     for (uint i = 0; i < _added_native; ++i) {
305       _events_native[i].commit();
306     }
307   }
308 }
309 
310 JfrThreadSampleClosure::JfrThreadSampleClosure(EventExecutionSample* events, EventNativeMethodSample* events_native) :
311   _events(events),
312   _events_native(events_native),
313   _self(Thread::current()),
314   _added_java(0),
315   _added_native(0) {
316 }
317 
318 class JfrThreadSampler : public NonJavaThread {
319   friend class JfrThreadSampling;
320  private:
321   Semaphore _sample;
322   Thread* _sampler_thread;
323   JfrStackFrame* const _frames;
324   JavaThread* _last_thread_java;
325   JavaThread* _last_thread_native;
326   size_t _interval_java;
327   size_t _interval_native;
328   const size_t _min_size; // for enqueue buffer monitoring
329   const size_t _renew_size;
330   int _cur_index;
331   const u4 _max_frames;
332   volatile bool _disenrolled;
333 
334   const JfrBuffer* get_enqueue_buffer();
335   const JfrBuffer* renew_if_full(const JfrBuffer* enqueue_buffer);
336 
337   JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current);
338   void task_stacktrace(JfrSampleType type, JavaThread** last_thread);
339   JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames);
340   ~JfrThreadSampler();
341 
342   void start_thread();
343 
344   void enroll();
345   void disenroll();
346   void set_java_interval(size_t interval) { _interval_java = interval; };
347   void set_native_interval(size_t interval) { _interval_native = interval; };
348   size_t get_java_interval() { return _interval_java; };
349   size_t get_native_interval() { return _interval_native; };
350  protected:
351   virtual void post_run();
352  public:
353   virtual const char* name() const { return "JFR Thread Sampler"; }
354   virtual const char* type_name() const { return "JfrThreadSampler"; }
355   bool is_JfrSampler_thread() const { return true; }
356   void run();
357   static Monitor* transition_block() { return JfrThreadSampler_lock; }
358   static void on_javathread_suspend(JavaThread* thread);
359 };
360 
361 static void clear_transition_block(JavaThread* jt) {
362   assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
363   jt->clear_trace_flag();
364   JfrThreadLocal* const tl = jt->jfr_thread_local();
365   MutexLocker ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag);
366   if (tl->is_trace_block()) {
367     JfrThreadSampler::transition_block()->notify();
368   }
369 }
370 
371 static bool is_excluded(JavaThread* thread) {
372   assert(thread != NULL, "invariant");
373   return thread->is_hidden_from_external_view() || thread->in_deopt_handler() || thread->jfr_thread_local()->is_excluded();
374 }
375 
376 bool JfrThreadSampleClosure::do_sample_thread(JavaThread* thread, JfrStackFrame* frames, u4 max_frames, JfrSampleType type) {
377   assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
378   if (is_excluded(thread)) {
379     return false;
380   }
381 
382   bool ret = false;
383   thread->set_trace_flag();  // Provides StoreLoad, needed to keep read of thread state from floating up.
384   if (JAVA_SAMPLE == type) {
385     if (thread_state_in_java(thread)) {
386       ret = sample_thread_in_java(thread, frames, max_frames);
387     }
388   } else {
389     assert(NATIVE_SAMPLE == type, "invariant");
390     if (thread_state_in_native(thread)) {
391       ret = sample_thread_in_native(thread, frames, max_frames);
392     }
393   }
394   clear_transition_block(thread);
395   return ret;
396 }
397 
398 JfrThreadSampler::JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames) :
399   _sample(),
400   _sampler_thread(NULL),
401   _frames(JfrCHeapObj::new_array<JfrStackFrame>(max_frames)),
402   _last_thread_java(NULL),
403   _last_thread_native(NULL),
404   _interval_java(interval_java),
405   _interval_native(interval_native),
406   _min_size(JfrOptionSet::stackdepth() * sizeof(intptr_t)),
407   _renew_size(_min_size * 2),
408   _cur_index(-1),
409   _max_frames(max_frames),
410   _disenrolled(true) {
411 }
412 
413 JfrThreadSampler::~JfrThreadSampler() {
414   JfrCHeapObj::free(_frames, sizeof(JfrStackFrame) * _max_frames);
415 }
416 
417 static inline bool is_released(JavaThread* jt) {
418   return !jt->is_trace_suspend();
419 }
420 
421 void JfrThreadSampler::on_javathread_suspend(JavaThread* thread) {
422   if (is_released(thread)) {
423     return;
424   }
425   JfrThreadLocal* const tl = thread->jfr_thread_local();
426   MonitorLocker ml(transition_block(), Mutex::_no_safepoint_check_flag);
427   tl->set_trace_block();
428   while (!is_released(thread)) {
429     ml.wait();
430   }
431   tl->clear_trace_block();
432 }
433 
434 JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
435   assert(t_list != NULL, "invariant");
436   assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
437   assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
438   assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
439   if ((uint)_cur_index + 1 == t_list->length()) {
440     // wrap
441     _cur_index = 0;
442   } else {
443     _cur_index++;
444   }
445   assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
446   JavaThread* const next = t_list->thread_at(_cur_index);
447   return next != first_sampled ? next : NULL;
448 }
449 
450 void JfrThreadSampler::start_thread() {
451   if (os::create_thread(this, os::os_thread)) {
452     os::start_thread(this);
453   } else {
454     log_error(jfr)("Failed to create thread for thread sampling");
455   }
456 }
457 
458 void JfrThreadSampler::enroll() {
459   if (_disenrolled) {
460     log_trace(jfr)("Enrolling thread sampler");
461     _sample.signal();
462     _disenrolled = false;
463   }
464 }
465 
466 void JfrThreadSampler::disenroll() {
467   if (!_disenrolled) {
468     _sample.wait();
469     _disenrolled = true;
470     log_trace(jfr)("Disenrolling thread sampler");
471   }
472 }
473 
474 static jlong get_monotonic_ms() {
475   return os::javaTimeNanos() / 1000000;
476 }
477 
478 void JfrThreadSampler::run() {
479   assert(_sampler_thread == NULL, "invariant");
480 
481   _sampler_thread = this;
482 
483   jlong last_java_ms = get_monotonic_ms();
484   jlong last_native_ms = last_java_ms;
485   while (true) {
486     if (!_sample.trywait()) {
487       // disenrolled
488       _sample.wait();
489       last_java_ms = get_monotonic_ms();
490       last_native_ms = last_java_ms;
491     }
492     _sample.signal();
493     jlong java_interval = _interval_java == 0 ? max_jlong : MAX2<jlong>(_interval_java, 1);
494     jlong native_interval = _interval_native == 0 ? max_jlong : MAX2<jlong>(_interval_native, 1);
495 
496     jlong now_ms = get_monotonic_ms();
497 
498     /*
499      * Let I be java_interval or native_interval.
500      * Let L be last_java_ms or last_native_ms.
501      * Let N be now_ms.
502      *
503      * Interval, I, might be max_jlong so the addition
504      * could potentially overflow without parenthesis (UB). Also note that
505      * L - N < 0. Avoid UB, by adding parenthesis.
506      */
507     jlong next_j = java_interval + (last_java_ms - now_ms);
508     jlong next_n = native_interval + (last_native_ms - now_ms);
509 
510     jlong sleep_to_next = MIN2<jlong>(next_j, next_n);
511 
512     if (sleep_to_next > 0) {
513       os::naked_short_sleep(sleep_to_next);
514     }
515 
516     if ((next_j - sleep_to_next) <= 0) {
517       task_stacktrace(JAVA_SAMPLE, &_last_thread_java);
518       last_java_ms = get_monotonic_ms();
519     }
520     if ((next_n - sleep_to_next) <= 0) {
521       task_stacktrace(NATIVE_SAMPLE, &_last_thread_native);
522       last_native_ms = get_monotonic_ms();
523     }
524   }
525 }
526 
527 void JfrThreadSampler::post_run() {
528   this->NonJavaThread::post_run();
529   delete this;
530 }
531 
532 const JfrBuffer* JfrThreadSampler::get_enqueue_buffer() {
533   const JfrBuffer* buffer = JfrTraceIdLoadBarrier::get_enqueue_buffer(this);
534   return buffer != nullptr ? renew_if_full(buffer) : JfrTraceIdLoadBarrier::renew_enqueue_buffer(_renew_size, this);
535 }
536 
537 const JfrBuffer* JfrThreadSampler::renew_if_full(const JfrBuffer* enqueue_buffer) {
538   assert(enqueue_buffer != nullptr, "invariant");
539   return enqueue_buffer->free_size() < _min_size ? JfrTraceIdLoadBarrier::renew_enqueue_buffer(_renew_size, this) : enqueue_buffer;
540 }
541 
542 void JfrThreadSampler::task_stacktrace(JfrSampleType type, JavaThread** last_thread) {
543   ResourceMark rm;
544   EventExecutionSample samples[MAX_NR_OF_JAVA_SAMPLES];
545   EventNativeMethodSample samples_native[MAX_NR_OF_NATIVE_SAMPLES];
546   JfrThreadSampleClosure sample_task(samples, samples_native);
547 
548   const uint sample_limit = JAVA_SAMPLE == type ? MAX_NR_OF_JAVA_SAMPLES : MAX_NR_OF_NATIVE_SAMPLES;
549   uint num_samples = 0;
550   JavaThread* start = NULL;
551   {
552     elapsedTimer sample_time;
553     sample_time.start();
554     {
555       MutexLocker tlock(Threads_lock);
556       ThreadsListHandle tlh;
557       // Resolve a sample session relative start position index into the thread list array.
558       // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1.
559       _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
560       JavaThread* current = _cur_index != -1 ? *last_thread : NULL;
561 
562       // Explicitly monitor the available space of the thread-local buffer used by the load barrier
563       // for enqueuing klasses as part of tagging methods. We do this because if space becomes sparse,
564       // we cannot rely on the implicit allocation of a new buffer as part of the regular tag mechanism.
565       // If the free list is empty, a malloc could result, and the problem with that is that the thread
566       // we have suspended could be the holder of the malloc lock. Instead, the buffer is pre-emptively
567       // renewed before thread suspension.
568       const JfrBuffer* enqueue_buffer = get_enqueue_buffer();
569       assert(enqueue_buffer != nullptr, "invariant");
570 
571       while (num_samples < sample_limit) {
572         current = next_thread(tlh.list(), start, current);
573         if (current == NULL) {
574           break;
575         }
576         if (start == NULL) {
577           start = current;  // remember the thread where we started to attempt sampling
578         }
579         if (current->is_Compiler_thread()) {
580           continue;
581         }
582         assert(enqueue_buffer->free_size() >= _min_size, "invariant");
583         if (sample_task.do_sample_thread(current, _frames, _max_frames, type)) {
584           num_samples++;
585         }
586         enqueue_buffer = renew_if_full(enqueue_buffer);
587       }
588       *last_thread = current;  // remember the thread we last attempted to sample
589     }
590     sample_time.stop();
591     log_trace(jfr)("JFR thread sampling done in %3.7f secs with %d java %d native samples",
592                    sample_time.seconds(), sample_task.java_entries(), sample_task.native_entries());
593   }
594   if (num_samples > 0) {
595     sample_task.commit_events(type);
596   }
597 }
598 
599 static JfrThreadSampling* _instance = NULL;
600 
601 JfrThreadSampling& JfrThreadSampling::instance() {
602   return *_instance;
603 }
604 
605 JfrThreadSampling* JfrThreadSampling::create() {
606   assert(_instance == NULL, "invariant");
607   _instance = new JfrThreadSampling();
608   return _instance;
609 }
610 
611 void JfrThreadSampling::destroy() {
612   if (_instance != NULL) {
613     delete _instance;
614     _instance = NULL;
615   }
616 }
617 
618 JfrThreadSampling::JfrThreadSampling() : _sampler(NULL) {}
619 
620 JfrThreadSampling::~JfrThreadSampling() {
621   if (_sampler != NULL) {
622     _sampler->disenroll();
623   }
624 }
625 
626 static void log(size_t interval_java, size_t interval_native) {
627   log_trace(jfr)("Updated thread sampler for java: " SIZE_FORMAT "  ms, native " SIZE_FORMAT " ms", interval_java, interval_native);
628 }
629 
630 void JfrThreadSampling::start_sampler(size_t interval_java, size_t interval_native) {
631   assert(_sampler == NULL, "invariant");
632   log_trace(jfr)("Enrolling thread sampler");
633   _sampler = new JfrThreadSampler(interval_java, interval_native, JfrOptionSet::stackdepth());
634   _sampler->start_thread();
635   _sampler->enroll();
636 }
637 
638 void JfrThreadSampling::set_sampling_interval(bool java_interval, size_t period) {
639   size_t interval_java = 0;
640   size_t interval_native = 0;
641   if (_sampler != NULL) {
642     interval_java = _sampler->get_java_interval();
643     interval_native = _sampler->get_native_interval();
644   }
645   if (java_interval) {
646     interval_java = period;
647   } else {
648     interval_native = period;
649   }
650   if (interval_java > 0 || interval_native > 0) {
651     if (_sampler == NULL) {
652       log_trace(jfr)("Creating thread sampler for java:%zu ms, native %zu ms", interval_java, interval_native);
653       start_sampler(interval_java, interval_native);
654     } else {
655       _sampler->set_java_interval(interval_java);
656       _sampler->set_native_interval(interval_native);
657       _sampler->enroll();
658     }
659     assert(_sampler != NULL, "invariant");
660     log(interval_java, interval_native);
661   } else if (_sampler != NULL) {
662     _sampler->disenroll();
663   }
664 }
665 
666 void JfrThreadSampling::set_java_sample_interval(size_t period) {
667   if (_instance == NULL && 0 == period) {
668     return;
669   }
670   instance().set_sampling_interval(true, period);
671 }
672 
673 void JfrThreadSampling::set_native_sample_interval(size_t period) {
674   if (_instance == NULL && 0 == period) {
675     return;
676   }
677   instance().set_sampling_interval(false, period);
678 }
679 
680 void JfrThreadSampling::on_javathread_suspend(JavaThread* thread) {
681   JfrThreadSampler::on_javathread_suspend(thread);
682 }