< prev index next >

src/hotspot/share/jfr/leakprofiler/sampling/objectSampler.cpp

Print this page

127   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
128   if (_instance != NULL) {
129     ObjectSampler* const sampler = _instance;
130     _instance = NULL;
131     delete sampler;
132   }
133 }
134 
135 static volatile int _lock = 0;
136 
137 ObjectSampler* ObjectSampler::acquire() {
138   while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
139   return _instance;
140 }
141 
142 void ObjectSampler::release() {
143   OrderAccess::fence();
144   _lock = 0;
145 }
146 
147 static traceid get_thread_id(JavaThread* thread) {
148   assert(thread != NULL, "invariant");

149   if (thread->threadObj() == NULL) {
150     return 0;
151   }
152   const JfrThreadLocal* const tl = thread->jfr_thread_local();
153   assert(tl != NULL, "invariant");
154   if (tl->is_excluded()) {
155     return 0;
156   }












157   if (!tl->has_thread_blob()) {
158     JfrCheckpointManager::create_thread_blob(thread);


159   }
160   assert(tl->has_thread_blob(), "invariant");
161   return tl->thread_id();
162 }
163 
164 class RecordStackTrace {
165  private:
166   JavaThread* _jt;
167   bool _enabled;
168  public:
169   RecordStackTrace(JavaThread* jt) : _jt(jt),
170     _enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
171     if (_enabled) {
172       JfrStackTraceRepository::record_for_leak_profiler(jt);
173     }
174   }
175   ~RecordStackTrace() {
176     if (_enabled) {
177       _jt->jfr_thread_local()->clear_cached_stack_trace();
178     }
179   }
180 };
181 
182 void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
183   assert(thread != NULL, "invariant");
184   assert(is_created(), "invariant");
185   const traceid thread_id = get_thread_id(thread);

186   if (thread_id == 0) {
187     return;
188   }


189   RecordStackTrace rst(thread);
190   // try enter critical section
191   JfrTryLock tryLock(&_lock);
192   if (!tryLock.acquired()) {
193     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
194     return;
195   }
196   instance().add(obj, allocated, thread_id, thread);
197 }
198 
199 void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, JavaThread* thread) {
200   assert(obj != NULL, "invariant");
201   assert(thread_id != 0, "invariant");
202   assert(thread != NULL, "invariant");
203   assert(thread->jfr_thread_local()->has_thread_blob(), "invariant");
204 
205   if (Atomic::load(&_dead_samples)) {
206     // There's a small race where a GC scan might reset this to true, potentially
207     // causing a back-to-back scavenge.
208     Atomic::store(&_dead_samples, false);
209     scavenge();
210   }
211 
212   _total_allocated += allocated;
213   const size_t span = _total_allocated - _priority_queue->total();
214   ObjectSample* sample;
215   if ((size_t)_priority_queue->count() == _size) {
216     assert(_list->count() == _size, "invariant");
217     const ObjectSample* peek = _priority_queue->peek();
218     if (peek->span() > span) {
219       // quick reject, will not fit
220       return;
221     }
222     sample = _list->reuse(_priority_queue->pop());
223   } else {
224     sample = _list->get();
225   }
226 
227   assert(sample != NULL, "invariant");
228   sample->set_thread_id(thread_id);




229 
230   const JfrThreadLocal* const tl = thread->jfr_thread_local();
231   sample->set_thread(tl->thread_blob());
232 
233   const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
234   if (stacktrace_hash != 0) {
235     sample->set_stack_trace_id(tl->cached_stack_trace_id());
236     sample->set_stack_trace_hash(stacktrace_hash);
237   }
238 
239   sample->set_span(allocated);
240   sample->set_object(cast_to_oop(obj));
241   sample->set_allocated(allocated);
242   sample->set_allocation_time(JfrTicks::now());
243   sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());
244   _priority_queue->push(sample);
245 }
246 
247 void ObjectSampler::scavenge() {
248   ObjectSample* current = _list->last();
249   while (current != NULL) {
250     ObjectSample* next = current->next();
251     if (current->is_dead()) {
252       remove_dead(current);

127   assert(SafepointSynchronize::is_at_safepoint(), "invariant");
128   if (_instance != NULL) {
129     ObjectSampler* const sampler = _instance;
130     _instance = NULL;
131     delete sampler;
132   }
133 }
134 
135 static volatile int _lock = 0;
136 
137 ObjectSampler* ObjectSampler::acquire() {
138   while (Atomic::cmpxchg(&_lock, 0, 1) == 1) {}
139   return _instance;
140 }
141 
142 void ObjectSampler::release() {
143   OrderAccess::fence();
144   _lock = 0;
145 }
146 
147 static traceid get_thread_id(JavaThread* thread, bool* virtual_thread) {
148   assert(thread != NULL, "invariant");
149   assert(virtual_thread != NULL, "invariant");
150   if (thread->threadObj() == NULL) {
151     return 0;
152   }
153   const JfrThreadLocal* const tl = thread->jfr_thread_local();
154   assert(tl != NULL, "invariant");
155   if (tl->is_excluded()) {
156     return 0;
157   }
158   return JfrThreadLocal::thread_id(thread, virtual_thread);
159 }
160 
161 static JfrBlobHandle get_thread_blob(JavaThread* thread, traceid tid, bool virtual_thread) {
162   assert(thread != NULL, "invariant");
163   JfrThreadLocal* const tl = thread->jfr_thread_local();
164   assert(tl != NULL, "invariant");
165   assert(!tl->is_excluded(), "invariant");
166   if (virtual_thread) {
167     // TODO: blob cache for virtual threads
168     return JfrCheckpointManager::create_thread_blob(thread, tid, thread->vthread());
169   }
170   if (!tl->has_thread_blob()) {
171     // for regular threads, the blob is cached in the thread local data structure
172     tl->set_thread_blob(JfrCheckpointManager::create_thread_blob(thread, tid));
173     assert(tl->has_thread_blob(), "invariant");
174   }
175   return tl->thread_blob();

176 }
177 
178 class RecordStackTrace {
179  private:
180   JavaThread* _jt;
181   bool _enabled;
182  public:
183   RecordStackTrace(JavaThread* jt) : _jt(jt),
184     _enabled(JfrEventSetting::has_stacktrace(EventOldObjectSample::eventId)) {
185     if (_enabled) {
186       JfrStackTraceRepository::record_for_leak_profiler(jt);
187     }
188   }
189   ~RecordStackTrace() {
190     if (_enabled) {
191       _jt->jfr_thread_local()->clear_cached_stack_trace();
192     }
193   }
194 };
195 
196 void ObjectSampler::sample(HeapWord* obj, size_t allocated, JavaThread* thread) {
197   assert(thread != NULL, "invariant");
198   assert(is_created(), "invariant");
199   bool virtual_thread = false;
200   const traceid thread_id = get_thread_id(thread, &virtual_thread);
201   if (thread_id == 0) {
202     return;
203   }
204   const JfrBlobHandle bh = get_thread_blob(thread, thread_id, virtual_thread);
205   assert(bh.valid(), "invariant");
206   RecordStackTrace rst(thread);
207   // try enter critical section
208   JfrTryLock tryLock(&_lock);
209   if (!tryLock.acquired()) {
210     log_trace(jfr, oldobject, sampling)("Skipping old object sample due to lock contention");
211     return;
212   }
213   instance().add(obj, allocated, thread_id, virtual_thread, bh, thread);
214 }
215 
216 void ObjectSampler::add(HeapWord* obj, size_t allocated, traceid thread_id, bool virtual_thread, const JfrBlobHandle& bh, JavaThread* thread) {
217   assert(obj != NULL, "invariant");
218   assert(thread_id != 0, "invariant");
219   assert(thread != NULL, "invariant");

220 
221   if (Atomic::load(&_dead_samples)) {
222     // There's a small race where a GC scan might reset this to true, potentially
223     // causing a back-to-back scavenge.
224     Atomic::store(&_dead_samples, false);
225     scavenge();
226   }
227 
228   _total_allocated += allocated;
229   const size_t span = _total_allocated - _priority_queue->total();
230   ObjectSample* sample;
231   if ((size_t)_priority_queue->count() == _size) {
232     assert(_list->count() == _size, "invariant");
233     const ObjectSample* peek = _priority_queue->peek();
234     if (peek->span() > span) {
235       // quick reject, will not fit
236       return;
237     }
238     sample = _list->reuse(_priority_queue->pop());
239   } else {
240     sample = _list->get();
241   }
242 
243   assert(sample != NULL, "invariant");
244   sample->set_thread_id(thread_id);
245   if (virtual_thread) {
246     sample->set_thread_is_virtual();
247   }
248   sample->set_thread(bh);
249 
250   const JfrThreadLocal* const tl = thread->jfr_thread_local();


251   const unsigned int stacktrace_hash = tl->cached_stack_trace_hash();
252   if (stacktrace_hash != 0) {
253     sample->set_stack_trace_id(tl->cached_stack_trace_id());
254     sample->set_stack_trace_hash(stacktrace_hash);
255   }
256 
257   sample->set_span(allocated);
258   sample->set_object(cast_to_oop(obj));
259   sample->set_allocated(allocated);
260   sample->set_allocation_time(JfrTicks::now());
261   sample->set_heap_used_at_last_gc(Universe::heap()->used_at_last_gc());
262   _priority_queue->push(sample);
263 }
264 
265 void ObjectSampler::scavenge() {
266   ObjectSample* current = _list->last();
267   while (current != NULL) {
268     ObjectSample* next = current->next();
269     if (current->is_dead()) {
270       remove_dead(current);
< prev index next >