< prev index next >

src/hotspot/share/jfr/recorder/stacktrace/jfrStackTraceRepository.cpp

Print this page

129 size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
130   MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
131   if (repo._entries == 0) {
132     return 0;
133   }
134   for (u4 i = 0; i < TABLE_SIZE; ++i) {
135     JfrStackTrace* stacktrace = repo._table[i];
136     while (stacktrace != NULL) {
137       JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
138       delete stacktrace;
139       stacktrace = next;
140     }
141   }
142   memset(repo._table, 0, sizeof(repo._table));
143   const size_t processed = repo._entries;
144   repo._entries = 0;
145   repo._last_entries = 0;
146   return processed;
147 }
148 
149 traceid JfrStackTraceRepository::record(Thread* thread, int skip /* 0 */) {
150   assert(thread == Thread::current(), "invariant");
151   JfrThreadLocal* const tl = thread->jfr_thread_local();
152   assert(tl != NULL, "invariant");
153   if (tl->has_cached_stack_trace()) {
154     return tl->cached_stack_trace_id();
155   }
156   if (!thread->is_Java_thread() || thread->is_hidden_from_external_view() || tl->is_excluded()) {
157     return 0;
158   }
159   JfrStackFrame* frames = tl->stackframes();
160   if (frames == NULL) {
161     // pending oom
162     return 0;
163   }
164   assert(frames != NULL, "invariant");
165   assert(tl->stackframes() == frames, "invariant");
166   return instance().record_for(JavaThread::cast(thread), skip, frames, tl->stackdepth());
167 }
168 
169 traceid JfrStackTraceRepository::record_for(JavaThread* thread, int skip, JfrStackFrame *frames, u4 max_frames) {
170   JfrStackTrace stacktrace(frames, max_frames);
171   return stacktrace.record_safe(thread, skip) ? add(instance(), stacktrace) : 0;
172 }
173 traceid JfrStackTraceRepository::add(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) {
174   traceid tid = repo.add_trace(stacktrace);
175   if (tid == 0) {
176     stacktrace.resolve_linenos();
177     tid = repo.add_trace(stacktrace);
178   }
179   assert(tid != 0, "invariant");
180   return tid;
181 }
182 
183 traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
184   return add(instance(), stacktrace);
185 }
186 
187 void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* thread, int skip /* 0 */) {
188   assert(thread != NULL, "invariant");
189   JfrThreadLocal* const tl = thread->jfr_thread_local();

190   assert(tl != NULL, "invariant");
191   assert(!tl->has_cached_stack_trace(), "invariant");
192   JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
193   stacktrace.record_safe(thread, skip);
194   const unsigned int hash = stacktrace.hash();
195   if (hash != 0) {
196     tl->set_cached_stack_trace_id(add(leak_profiler_instance(), stacktrace), hash);
197   }
198 }
199 
200 traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
201   MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);

202   const size_t index = stacktrace._hash % TABLE_SIZE;
203   const JfrStackTrace* table_entry = _table[index];
204 
205   while (table_entry != NULL) {
206     if (table_entry->equals(stacktrace)) {
207       return table_entry->id();
208     }
209     table_entry = table_entry->next();
210   }
211 
212   if (!stacktrace.have_lineno()) {
213     return 0;
214   }
215 
216   traceid id = ++_next_id;
217   _table[index] = new JfrStackTrace(id, stacktrace, _table[index]);
218   ++_entries;
219   return id;
220 }
221 

129 size_t JfrStackTraceRepository::clear(JfrStackTraceRepository& repo) {
130   MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
131   if (repo._entries == 0) {
132     return 0;
133   }
134   for (u4 i = 0; i < TABLE_SIZE; ++i) {
135     JfrStackTrace* stacktrace = repo._table[i];
136     while (stacktrace != NULL) {
137       JfrStackTrace* next = const_cast<JfrStackTrace*>(stacktrace->next());
138       delete stacktrace;
139       stacktrace = next;
140     }
141   }
142   memset(repo._table, 0, sizeof(repo._table));
143   const size_t processed = repo._entries;
144   repo._entries = 0;
145   repo._last_entries = 0;
146   return processed;
147 }
148 
149 traceid JfrStackTraceRepository::record(Thread* current_thread, int skip /* 0 */) {
150   assert(current_thread == Thread::current(), "invariant");
151   JfrThreadLocal* const tl = current_thread->jfr_thread_local();
152   assert(tl != NULL, "invariant");
153   if (tl->has_cached_stack_trace()) {
154     return tl->cached_stack_trace_id();
155   }
156   if (!current_thread->is_Java_thread() || current_thread->is_hidden_from_external_view() || tl->is_excluded()) {
157     return 0;
158   }
159   JfrStackFrame* frames = tl->stackframes();
160   if (frames == NULL) {
161     // pending oom
162     return 0;
163   }
164   assert(frames != NULL, "invariant");
165   assert(tl->stackframes() == frames, "invariant");
166   return instance().record(JavaThread::cast(current_thread), skip, frames, tl->stackdepth());
167 }
168 
169 traceid JfrStackTraceRepository::record(JavaThread* current_thread, int skip, JfrStackFrame *frames, u4 max_frames) {
170   JfrStackTrace stacktrace(frames, max_frames);
171   return stacktrace.record(current_thread, skip) ? add(instance(), stacktrace) : 0;
172 }
173 traceid JfrStackTraceRepository::add(JfrStackTraceRepository& repo, const JfrStackTrace& stacktrace) {
174   traceid tid = repo.add_trace(stacktrace);
175   if (tid == 0) {
176     stacktrace.resolve_linenos();
177     tid = repo.add_trace(stacktrace);
178   }
179   assert(tid != 0, "invariant");
180   return tid;
181 }
182 
183 traceid JfrStackTraceRepository::add(const JfrStackTrace& stacktrace) {
184   return add(instance(), stacktrace);
185 }
186 
187 void JfrStackTraceRepository::record_for_leak_profiler(JavaThread* current_thread, int skip /* 0 */) {
188   assert(current_thread != NULL, "invariant");
189   assert(current_thread == Thread::current(), "invariant");
190   JfrThreadLocal* const tl = current_thread->jfr_thread_local();
191   assert(tl != NULL, "invariant");
192   assert(!tl->has_cached_stack_trace(), "invariant");
193   JfrStackTrace stacktrace(tl->stackframes(), tl->stackdepth());
194   stacktrace.record(current_thread, skip);
195   const unsigned int hash = stacktrace.hash();
196   if (hash != 0) {
197     tl->set_cached_stack_trace_id(add(leak_profiler_instance(), stacktrace), hash);
198   }
199 }
200 
201 traceid JfrStackTraceRepository::add_trace(const JfrStackTrace& stacktrace) {
202   MutexLocker lock(JfrStacktrace_lock, Mutex::_no_safepoint_check_flag);
203   assert(stacktrace._nr_of_frames > 0, "invariant");
204   const size_t index = stacktrace._hash % TABLE_SIZE;
205   const JfrStackTrace* table_entry = _table[index];
206 
207   while (table_entry != NULL) {
208     if (table_entry->equals(stacktrace)) {
209       return table_entry->id();
210     }
211     table_entry = table_entry->next();
212   }
213 
214   if (!stacktrace.have_lineno()) {
215     return 0;
216   }
217 
218   traceid id = ++_next_id;
219   _table[index] = new JfrStackTrace(id, stacktrace, _table[index]);
220   ++_entries;
221   return id;
222 }
223 
< prev index next >