146
147 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148 f.iterate_oops(&cl, map);
149
150 return true;
151 }
152 };
153
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155 if (UseZGC || UseShenandoahGC) {
156 // An OopClosure could apply barriers to a stack chunk. The side effects
157 // of the load barriers could destroy derived pointers, which must be
158 // processed before their base oop is processed. So we force processing
159 // of derived pointers before applying the closures.
160 chunk->relativize_derived_pointers_concurrently();
161 }
162 OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163 chunk->iterate_stack(&frame_closure);
164 }
165
166 #ifdef ASSERT
167
168 class DescribeStackChunkClosure {
169 stackChunkOop _chunk;
170 FrameValues _values;
171 RegisterMap _map;
172 int _frame_no;
173
174 public:
175 DescribeStackChunkClosure(stackChunkOop chunk)
176 : _chunk(chunk),
177 _map(nullptr,
178 RegisterMap::UpdateMap::include,
179 RegisterMap::ProcessFrames::skip,
180 RegisterMap::WalkContinuation::include),
181 _frame_no(0) {
182 _map.set_include_argument_oops(false);
183 }
184
185 const RegisterMap* get_map(const RegisterMap* map, intptr_t* sp) { return map; }
207 describe_chunk();
208 _values.print_on(_chunk, out);
209 } else {
210 out->print_cr(" EMPTY");
211 }
212 }
213 };
214 #endif
215
216 class PrintStackChunkClosure {
217 outputStream* _st;
218
219 public:
220 PrintStackChunkClosure(outputStream* st) : _st(st) {}
221
222 template <ChunkFrames frame_kind, typename RegisterMapT>
223 bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
224 frame f = fs.to_frame();
225 _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
226 p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
227 fs.is_interpreted() ? 0 : f.compiled_frame_stack_argsize());
228 #ifdef ASSERT
229 f.print_value_on(_st);
230 #else
231 f.print_on(_st);
232 #endif
233 const ImmutableOopMap* oopmap = fs.oopmap();
234 if (oopmap != nullptr) {
235 oopmap->print_on(_st);
236 _st->cr();
237 }
238 return true;
239 }
240 };
241
242 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
243 if (c == nullptr) {
244 st->print_cr("CHUNK null");
245 return;
246 }
247
|
146
147 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148 f.iterate_oops(&cl, map);
149
150 return true;
151 }
152 };
153
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155 if (UseZGC || UseShenandoahGC) {
156 // An OopClosure could apply barriers to a stack chunk. The side effects
157 // of the load barriers could destroy derived pointers, which must be
158 // processed before their base oop is processed. So we force processing
159 // of derived pointers before applying the closures.
160 chunk->relativize_derived_pointers_concurrently();
161 }
162 OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163 chunk->iterate_stack(&frame_closure);
164 }
165
166 template <typename OopT>
167 void InstanceStackChunkKlass::oop_oop_iterate_lockstack(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
168 if (LockingMode != LM_LIGHTWEIGHT) {
169 return;
170 }
171
172 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(closure, mr);
173 if (chunk->has_bitmap()) {
174 chunk->iterate_lockstack<OopT>(&cl);
175 } else {
176 chunk->iterate_lockstack<oop>(&cl);
177 }
178 }
179
180 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<oop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
181 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<narrowOop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
182
183 #ifdef ASSERT
184
185 class DescribeStackChunkClosure {
186 stackChunkOop _chunk;
187 FrameValues _values;
188 RegisterMap _map;
189 int _frame_no;
190
191 public:
192 DescribeStackChunkClosure(stackChunkOop chunk)
193 : _chunk(chunk),
194 _map(nullptr,
195 RegisterMap::UpdateMap::include,
196 RegisterMap::ProcessFrames::skip,
197 RegisterMap::WalkContinuation::include),
198 _frame_no(0) {
199 _map.set_include_argument_oops(false);
200 }
201
202 const RegisterMap* get_map(const RegisterMap* map, intptr_t* sp) { return map; }
224 describe_chunk();
225 _values.print_on(_chunk, out);
226 } else {
227 out->print_cr(" EMPTY");
228 }
229 }
230 };
231 #endif
232
233 class PrintStackChunkClosure {
234 outputStream* _st;
235
236 public:
237 PrintStackChunkClosure(outputStream* st) : _st(st) {}
238
239 template <ChunkFrames frame_kind, typename RegisterMapT>
240 bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
241 frame f = fs.to_frame();
242 _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
243 p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
244 fs.is_interpreted() || fs.is_stub() ? 0 : f.compiled_frame_stack_argsize());
245 #ifdef ASSERT
246 f.print_value_on(_st);
247 #else
248 f.print_on(_st);
249 #endif
250 const ImmutableOopMap* oopmap = fs.oopmap();
251 if (oopmap != nullptr) {
252 oopmap->print_on(_st);
253 _st->cr();
254 }
255 return true;
256 }
257 };
258
259 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
260 if (c == nullptr) {
261 st->print_cr("CHUNK null");
262 return;
263 }
264
|