< prev index next >

src/hotspot/share/oops/instanceStackChunkKlass.cpp

Print this page

146 
147     StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148     f.iterate_oops(&cl, map);
149 
150     return true;
151   }
152 };
153 
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155   if (UseZGC || UseShenandoahGC) {
156     // An OopClosure could apply barriers to a stack chunk. The side effects
157     // of the load barriers could destroy derived pointers, which must be
158     // processed before their base oop is processed. So we force processing
159     // of derived pointers before applying the closures.
160     chunk->relativize_derived_pointers_concurrently();
161   }
162   OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163   chunk->iterate_stack(&frame_closure);
164 }
165 













166 #ifdef ASSERT
167 
168 class DescribeStackChunkClosure {
169   stackChunkOop _chunk;
170   FrameValues _values;
171   RegisterMap _map;
172   int _frame_no;
173 
174 public:
175   DescribeStackChunkClosure(stackChunkOop chunk)
176     : _chunk(chunk),
177       _map(nullptr,
178            RegisterMap::UpdateMap::include,
179            RegisterMap::ProcessFrames::skip,
180            RegisterMap::WalkContinuation::include),
181       _frame_no(0) {
182     _map.set_include_argument_oops(false);
183   }
184 
185   const RegisterMap* get_map(const RegisterMap* map,      intptr_t* sp) { return map; }

207       describe_chunk();
208       _values.print_on(_chunk, out);
209     } else {
210       out->print_cr(" EMPTY");
211     }
212   }
213 };
214 #endif
215 
216 class PrintStackChunkClosure {
217   outputStream* _st;
218 
219 public:
220   PrintStackChunkClosure(outputStream* st) : _st(st) {}
221 
222   template <ChunkFrames frame_kind, typename RegisterMapT>
223   bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
224     frame f = fs.to_frame();
225     _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
226                   p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
227                   fs.is_interpreted() ? 0 : f.compiled_frame_stack_argsize());
228   #ifdef ASSERT
229     f.print_value_on(_st, nullptr);
230   #else
231     f.print_on(_st);
232   #endif
233     const ImmutableOopMap* oopmap = fs.oopmap();
234     if (oopmap != nullptr) {
235       oopmap->print_on(_st);
236       _st->cr();
237     }
238     return true;
239   }
240 };
241 
242 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
243   if (c == nullptr) {
244     st->print_cr("CHUNK null");
245     return;
246   }
247 

146 
147     StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148     f.iterate_oops(&cl, map);
149 
150     return true;
151   }
152 };
153 
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155   if (UseZGC || UseShenandoahGC) {
156     // An OopClosure could apply barriers to a stack chunk. The side effects
157     // of the load barriers could destroy derived pointers, which must be
158     // processed before their base oop is processed. So we force processing
159     // of derived pointers before applying the closures.
160     chunk->relativize_derived_pointers_concurrently();
161   }
162   OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163   chunk->iterate_stack(&frame_closure);
164 }
165 
166 template <typename OopT>
167 void InstanceStackChunkKlass::oop_oop_iterate_lockstack(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
168   StackChunkOopIterateFilterClosure<OopIterateClosure> cl(closure, mr);
169   if (chunk->has_bitmap()) {
170     chunk->iterate_lockstack<OopT>(&cl);
171   } else {
172     chunk->iterate_lockstack<oop>(&cl);
173   }
174 }
175 
176 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<oop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
177 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<narrowOop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
178 
179 #ifdef ASSERT
180 
181 class DescribeStackChunkClosure {
182   stackChunkOop _chunk;
183   FrameValues _values;
184   RegisterMap _map;
185   int _frame_no;
186 
187 public:
188   DescribeStackChunkClosure(stackChunkOop chunk)
189     : _chunk(chunk),
190       _map(nullptr,
191            RegisterMap::UpdateMap::include,
192            RegisterMap::ProcessFrames::skip,
193            RegisterMap::WalkContinuation::include),
194       _frame_no(0) {
195     _map.set_include_argument_oops(false);
196   }
197 
198   const RegisterMap* get_map(const RegisterMap* map,      intptr_t* sp) { return map; }

220       describe_chunk();
221       _values.print_on(_chunk, out);
222     } else {
223       out->print_cr(" EMPTY");
224     }
225   }
226 };
227 #endif
228 
229 class PrintStackChunkClosure {
230   outputStream* _st;
231 
232 public:
233   PrintStackChunkClosure(outputStream* st) : _st(st) {}
234 
235   template <ChunkFrames frame_kind, typename RegisterMapT>
236   bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
237     frame f = fs.to_frame();
238     _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
239                   p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
240                   fs.is_interpreted() || fs.is_stub() ? 0 : f.compiled_frame_stack_argsize());
241   #ifdef ASSERT
242     f.print_value_on(_st, nullptr);
243   #else
244     f.print_on(_st);
245   #endif
246     const ImmutableOopMap* oopmap = fs.oopmap();
247     if (oopmap != nullptr) {
248       oopmap->print_on(_st);
249       _st->cr();
250     }
251     return true;
252   }
253 };
254 
255 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
256   if (c == nullptr) {
257     st->print_cr("CHUNK null");
258     return;
259   }
260 
< prev index next >