1 /*
  2  * Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "gc/shared/collectedHeap.hpp"
 27 #include "gc/shared/oopStorage.hpp"
 28 #include "gc/shared/oopStorageSet.hpp"
 29 #include "jfr/jfrEvents.hpp"
 30 #include "jfr/leakprofiler/checkpoint/objectSampleCheckpoint.hpp"
 31 #include "jfr/leakprofiler/sampling/objectSample.hpp"
 32 #include "jfr/leakprofiler/sampling/objectSampler.hpp"
 33 #include "jfr/leakprofiler/sampling/sampleList.hpp"
 34 #include "jfr/leakprofiler/sampling/samplePriorityQueue.hpp"
 35 #include "jfr/recorder/jfrEventSetting.inline.hpp"
 36 #include "jfr/recorder/checkpoint/jfrCheckpointManager.hpp"
 37 #include "jfr/recorder/stacktrace/jfrStackTraceRepository.hpp"
 38 #include "jfr/support/jfrThreadLocal.hpp"
 39 #include "jfr/utilities/jfrTime.hpp"
 40 #include "jfr/utilities/jfrTryLock.hpp"
 41 #include "logging/log.hpp"
 42 #include "memory/universe.hpp"
 43 #include "oops/oop.inline.hpp"
 44 #include "runtime/atomic.hpp"
 45 #include "runtime/orderAccess.hpp"
 46 #include "runtime/safepoint.hpp"
 47 #include "runtime/thread.hpp"
 48 
 49 // Timestamp of when the gc last processed the set of sampled objects.
 50 // Atomic access to prevent word tearing on 32-bit platforms.
 51 static volatile int64_t _last_sweep;
 52 
 53 // Condition variable to communicate that some sampled objects have been cleared by the gc
 54 // and can therefore be removed from the sample priority queue.
 55 static bool volatile _dead_samples = false;
 56 
 57 // The OopStorage instance is used to hold weak references to sampled objects.
 58 // It is constructed and registered during VM initialization. This is a singleton
 59 // that persist independent of the state of the ObjectSampler.
 60 static OopStorage* _oop_storage = NULL;
 61 
 62 OopStorage* ObjectSampler::oop_storage() { return _oop_storage; }
 63 
 64 // Callback invoked by the GC after an iteration over the oop storage
 65 // that may have cleared dead referents. num_dead is the number of entries
 66 // already NULL or cleared by the iteration.
 67 void ObjectSampler::oop_storage_gc_notification(size_t num_dead) {
 68   if (num_dead != 0) {
 69     // The ObjectSampler instance may have already been cleaned or a new
 70     // instance was created concurrently.  This allows for a small race where cleaning
 71     // could be done again.
 72     Atomic::store(&_dead_samples, true);
 73     Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());
 74   }
 75 }
 76 
 77 bool ObjectSampler::create_oop_storage() {
 78   _oop_storage = OopStorageSet::create_weak("Weak JFR Old Object Samples", mtTracing);
 79   assert(_oop_storage != NULL, "invariant");
 80   _oop_storage->register_num_dead_callback(&oop_storage_gc_notification);
 81   return true;
 82 }
 83 
 84 static ObjectSampler* _instance = NULL;
 85 
 86 static ObjectSampler& instance() {
 87   assert(_instance != NULL, "invariant");
 88   return *_instance;
 89 }
 90 
 91 ObjectSampler::ObjectSampler(size_t size) :
 92         _priority_queue(new SamplePriorityQueue(size)),
 93         _list(new SampleList(size)),
 94         _total_allocated(0),
 95         _threshold(0),
 96         _size(size) {
 97   Atomic::store(&_dead_samples, false);
 98   Atomic::store(&_last_sweep, (int64_t)JfrTicks::now().value());
 99 }
100 
101 ObjectSampler::~ObjectSampler() {
102   delete _priority_queue;
103   _priority_queue = NULL;
104   delete _list;
105   _list = NULL;
106 }
107 
108 bool ObjectSampler::create(size_t size) {
109   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
110   assert(_oop_storage != NULL, "should be already created");
111   ObjectSampleCheckpoint::clear();
112   assert(_instance == NULL, "invariant");
113   _instance = new ObjectSampler(size);
114   return _instance != NULL;
115 }
116 
117 bool ObjectSampler::is_created() {
118   return _instance != NULL;
119 }
120 
121 ObjectSampler* ObjectSampler::sampler() {
122   assert(is_created(), "invariant");
123   return _instance;
124 }
125 
126 void ObjectSampler::destroy() {
127   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
128   if (_instance != NULL) {
129     ObjectSampler* const sampler = _instance;
130     _instance = NULL;
131     delete sampler;
132   }
133 }
134 
135 static volatile int _lock = 0;
136 
137 ObjectSampler* ObjectSampler::acquire() {
138   while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
139   return _instance;
140 }
141 
142 void ObjectSampler::release() {
143   OrderAccess::fence();
144   _lock = 0;
145 }
146 
147 static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) {
148   assert(thread != NULL, "invariant");
149   assert(virtual_thread != NULL, "invariant");
150   if (thread->threadObj() == NULL) {
151     return 0;
152   }
153   const JfrThreadLocal* const tl = thread->jfr_thread_local();
154   assert(tl != NULL, "invariant");
155   if (tl->is_excluded()) {
156     return 0;
157   }
158   return JfrThreadLocal::thread_id(thread, virtual_thread);
159 }
160 
161 static JfrBlobHandle get_thread_blob(JavaThread* thread, traceid tid, bool virtual_thread) {
162   assert(thread != NULL, "invariant");
163   JfrThreadLocal* const tl = thread->jfr_thread_local();
164   assert(tl != NULL, "invariant");
165   assert(!tl->is_excluded(), "invariant");
166   if (virtual_thread) {
167     // TODO: blob cache for virtual threads
168     return JfrCheckpointManager::create_thread_blob(thread, tid, thread->vthread());
169   }
170   if (!tl->has_thread_blob()) {
171     // for regular threads, the blob is cached in the thread local data structure
172     tl->set_thread_blob(JfrCheckpointManager::create_thread_blob(thread, tid));
173     assert(tl->has_thread_blob(), "invariant");
174   }
175   return tl->thread_blob();
176 }
177 
178 class RecordStackTrace {
179  private:
180   JavaThread* _jt;
181   bool _enabled;
182  public:
183   RecordStackTrace(JavaThread* jt) : _jt(jt),
184     _enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
185     if (_enabled) {
186       JfrStackTraceRepository::record_for_leak_profiler(jt);
187     }
188   }
189   ~RecordStackTrace() {
190     if (_enabled) {
191       _jt->jfr_thread_local()->clear_cached_stack_trace();
192     }
193   }
194 };
195 
196 void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
197   assert(thread != NULL, "invariant");
198   assert(is_created(), "invariant");
199   bool virtual_thread = false;
200   const traceid thread_id = get_thread_id(thread, &virtual_thread);
201   if (thread_id == 0) {
202     return;
203   }
204   const JfrBlobHandle bh = get_thread_blob(thread, thread_id, virtual_thread);
205   assert(bh.valid(), "invariant");
206   RecordStackTrace rst(thread);
207   // try enter critical section
208   JfrTryLock tryLock(&_lock);
209   if (!tryLock.acquired()) {
210     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
211     return;
212   }
213   instance().add(obj, allocated, thread_id, virtual_thread, bh, thread);
214 }
215 
216 void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread) {
217   assert(obj != NULL, "invariant");
218   assert(thread_id != 0, "invariant");
219   assert(thread != NULL, "invariant");
220 
221   if (Atomic::load(&_dead_samples)) {
222     // There's a small race where a GC scan might reset this to true, potentially
223     // causing a back-to-back scavenge.
224     Atomic::store(&_dead_samples, false);
225     scavenge();
226   }
227 
228   _total_allocated += allocated;
229   const size_t span = _total_allocated - _priority_queue->total();
230   ObjectSample* sample;
231   if ((size_t)_priority_queue->count() == _size) {
232     assert(_list->count() == _size, "invariant");
233     const ObjectSample* peek = _priority_queue->peek();
234     if (peek->span() > span) {
235       // quick reject, will not fit
236       return;
237     }
238     sample = _list->reuse(_priority_queue->pop());
239   } else {
240     sample = _list->get();
241   }
242 
243   assert(sample != NULL, "invariant");
244   sample->set_thread_id(thread_id);
245   if (virtual_thread) {
246     sample->set_thread_is_virtual();
247   }
248   sample->set_thread(bh);
249 
250   const JfrThreadLocal* const tl = thread->jfr_thread_local();
251   const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
252   if (stacktrace_hash != 0) {
253     sample->set_stack_trace_id(tl->cached_stack_trace_id());
254     sample->set_stack_trace_hash(stacktrace_hash);
255   }
256 
257   sample->set_span(allocated);
258   sample->set_object(cast_to_oop(obj));
259   sample->set_allocated(allocated);
260   sample->set_allocation_time(JfrTicks::now());
261   sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());
262   _priority_queue->push(sample);
263 }
264 
265 void ObjectSampler::scavenge() {
266   ObjectSample* current = _list->last();
267   while (current != NULL) {
268     ObjectSample* next = current->next();
269     if (current->is_dead()) {
270       remove_dead(current);
271     }
272     current = next;
273   }
274 }
275 
276 void ObjectSampler::remove_dead(ObjectSample* sample) {
277   assert(sample != NULL, "invariant");
278   assert(sample->is_dead(), "invariant");
279   sample->release();
280 
281   ObjectSample* const previous = sample->prev();
282   // push span onto previous
283   if (previous != NULL) {
284     _priority_queue->remove(previous);
285     previous->add_span(sample->span());
286     _priority_queue->push(previous);
287   }
288   _priority_queue->remove(sample);
289   _list->release(sample);
290 }
291 
292 ObjectSample* ObjectSampler::last() const {
293   return _list->last();
294 }
295 
296 const ObjectSample* ObjectSampler::first() const {
297   return _list->first();
298 }
299 
300 const ObjectSample* ObjectSampler::last_resolved() const {
301   return _list->last_resolved();
302 }
303 
304 void ObjectSampler::set_last_resolved(const ObjectSample* sample) {
305   _list->set_last_resolved(sample);
306 }
307 
308 int ObjectSampler::item_count() const {
309   return _priority_queue->count();
310 }
311 
312 const ObjectSample* ObjectSampler::item_at(int index) const {
313   return _priority_queue->item_at(index);
314 }
315 
316 ObjectSample* ObjectSampler::item_at(int index) {
317   return const_cast<ObjectSample*>(
318     const_cast<const ObjectSampler*>(this)->item_at(index)
319                                   );
320 }
321 
322 int64_t ObjectSampler::last_sweep() {
323   return Atomic::load(&_last_sweep);
324 }