< prev index next >

src/hotspot/share/oops/stackChunkOop.cpp

Print this page

 48 private:
 49   const frame& _f;
 50   const RegisterMapT* _map;
 51 
 52 public:
 53   FrameOopIterator(const frame& f, const RegisterMapT* map)
 54     : _f(f),
 55       _map(map) {
 56   }
 57 
 58   virtual void oops_do(OopClosure* cl) override {
 59     if (_f.is_interpreted_frame()) {
 60       _f.oops_interpreted_do(cl, nullptr);
 61     } else {
 62       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 63       visitor.oops_do(&_f, _map, _f.oop_map());
 64     }
 65   }
 66 };
 67 















 68 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 69   assert(!is_empty(), "");
 70   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 71 
 72   map->set_stack_chunk(this);
 73   fs.initialize_register_map(map);
 74 
 75   frame f = fs.to_frame();
 76 
 77   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 78   relativize_frame(f);
 79   f.set_frame_index(0);
 80   return f;
 81 }
 82 
 83 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 84   assert(map->in_cont(), "");
 85   assert(!map->include_argument_oops(), "");
 86   assert(!f.is_empty(), "");
 87   assert(map->stack_chunk() == this, "");

207   stackChunkOop _chunk;
208   DerivedPointerClosureType* _cl;
209 
210 public:
211   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
212     : _chunk(chunk),
213       _cl(cl) {
214   }
215 
216   template <ChunkFrames frame_kind, typename RegisterMapT>
217   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
218     f.iterate_derived_pointers(_cl, map);
219 
220     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
221     frame fr = f.to_frame();
222     FrameOopIterator<RegisterMapT> iterator(fr, map);
223     bs_chunk->encode_gc_mode(_chunk, &iterator);
224 
225     return true;
226   }








227 };
228 
229 bool stackChunkOopDesc::try_acquire_relativization() {
230   for (;;) {
231     // We use an acquiring load when reading the flags to ensure that if we leave this
232     // function thinking that relativization is finished, we know that if another thread
233     // did the relativization, we will still be able to observe the relativized derived
234     // pointers, which is important as subsequent modifications of derived pointers must
235     // happen after relativization.
236     uint8_t flags_before = flags_acquire();
237     if ((flags_before & FLAG_GC_MODE) != 0) {
238       // Terminal state - relativization is ensured
239       return false;
240     }
241 
242     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
243       // Someone else has claimed relativization - wait for completion
244       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
245       uint8_t flags_under_lock = flags_acquire();
246       if ((flags_under_lock & FLAG_GC_MODE) != 0) {

281       ml.notify_all();
282       return;
283     }
284 
285     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
286       // Successfully set the terminal state; we are done
287       return;
288     }
289   }
290 }
291 
292 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
293   if (!try_acquire_relativization()) {
294     // Already relativized
295     return;
296   }
297 
298   DerivedPointersSupport::RelativizeClosure derived_cl;
299   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
300   iterate_stack(&frame_cl);

301 
302   release_relativization();
303 }
304 
305 class TransformStackChunkClosure {
306   stackChunkOop _chunk;
307 
308 public:
309   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
310 
311   template <ChunkFrames frame_kind, typename RegisterMapT>
312   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
313     DerivedPointersSupport::RelativizeClosure derived_cl;
314     f.iterate_derived_pointers(&derived_cl, map);
315 
316     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
317     frame fr = f.to_frame();
318     FrameOopIterator<RegisterMapT> iterator(fr, map);
319     bs_chunk->encode_gc_mode(_chunk, &iterator);
320 
321     return true;
322   }








323 };
324 
325 void stackChunkOopDesc::transform() {
326   assert(!is_gc_mode(), "Should only be called once per chunk");
327   set_gc_mode(true);
328 
329   assert(!has_bitmap(), "Should only be set once");
330   set_has_bitmap(true);
331   bitmap().clear();
332 
333   TransformStackChunkClosure closure(this);
334   iterate_stack(&closure);

335 }
336 
337 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
338 class BarrierClosure: public OopClosure {
339   NOT_PRODUCT(intptr_t* _sp;)
340 
341 public:
342   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
343 
344   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
345   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
346 
347   template <class T> inline void do_oop_work(T* p) {
348     oop value = (oop)HeapAccess<>::oop_load(p);
349     if (barrier == stackChunkOopDesc::BarrierType::Store) {
350       HeapAccess<>::oop_store(p, value);
351     }
352   }
353 };
354 

391 template <typename RegisterMapT>
392 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
393   if (!(is_gc_mode() || requires_barriers())) {
394     return;
395   }
396 
397   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
398   FrameOopIterator<RegisterMapT> iterator(f, map);
399   bs_chunk->decode_gc_mode(this, &iterator);
400 
401   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
402     DerivedPointersSupport::DerelativizeClosure derived_closure;
403     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
404     visitor.oops_do(&f, map, f.oop_map());
405   }
406 }
407 
408 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
409 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
410 





























411 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
412   if (*((juint*)this) == badHeapWordVal) {
413     st->print_cr("BAD WORD");
414   } else if (*((juint*)this) == badMetaWordVal) {
415     st->print_cr("BAD META WORD");
416   } else {
417     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
418   }
419 }
420 
421 #ifdef ASSERT
422 
423 class StackChunkVerifyOopsClosure : public OopClosure {
424   stackChunkOop _chunk;
425   int _count;
426 
427 public:
428   StackChunkVerifyOopsClosure(stackChunkOop chunk)
429     : _chunk(chunk), _count(0) {}
430 

441     }
442   }
443 
444   int count() const { return _count; }
445 };
446 
447 class VerifyStackChunkFrameClosure {
448   stackChunkOop _chunk;
449 
450 public:
451   intptr_t* _sp;
452   CodeBlob* _cb;
453   bool _callee_interpreted;
454   int _size;
455   int _argsize;
456   int _num_oops;
457   int _num_frames;
458   int _num_interpreted_frames;
459   int _num_i2c;
460 
461   VerifyStackChunkFrameClosure(stackChunkOop chunk, int num_frames, int size)
462     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
463       _size(size), _argsize(0), _num_oops(0), _num_frames(num_frames), _num_interpreted_frames(0), _num_i2c(0) {}
464 
465   template <ChunkFrames frame_kind, typename RegisterMapT>
466   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
467     _sp = f.sp();
468     _cb = f.cb();
469 
470     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
471     int num_oops = f.num_oops();
472     assert(num_oops >= 0, "");
473 
474     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
475     _size     += fsize;
476     _num_oops += num_oops;
477     if (f.is_interpreted()) {
478       _num_interpreted_frames++;
479     }
480 
481     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
482     LogTarget(Trace, continuations) lt;
483     if (lt.develop_is_enabled()) {

537   assert(stack_size() >= 0, "");
538   assert(argsize() >= 0, "");
539   assert(!has_bitmap() || is_gc_mode(), "");
540 
541   if (is_empty()) {
542     assert(argsize() == 0, "");
543     assert(max_thawing_size() == 0, "");
544   }
545 
546   assert(oopDesc::is_oop_or_null(parent()), "");
547 
548   const bool concurrent = !Thread::current()->is_Java_thread();
549 
550   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
551   // for the top frame (below sp), and *not* for the bottom frame.
552   int size = stack_size() - argsize() - sp();
553   assert(size >= 0, "");
554   assert((size == 0) == is_empty(), "");
555 
556   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
557   const bool has_safepoint_stub_frame = first.is_stub();
558 
559   VerifyStackChunkFrameClosure closure(this,
560                                        has_safepoint_stub_frame ? 1 : 0, // Iterate_stack skips the safepoint stub
561                                        has_safepoint_stub_frame ? first.frame_size() : 0);
562   iterate_stack(&closure);
563 
564   assert(!is_empty() || closure._cb == nullptr, "");
565   if (closure._cb != nullptr && closure._cb->is_compiled()) {
566     assert(argsize() ==
567       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
568       "chunk argsize: %d bottom frame argsize: %d", argsize(),
569       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
570   }
571 
572   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
573 
574   if (!concurrent) {
575     assert(closure._size <= size + argsize() + frame::metadata_words,
576            "size: %d argsize: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
577            size, argsize(), closure._size, closure._sp - start_address(), sp(), stack_size());
578     assert(argsize() == closure._argsize - (closure._num_frames > 0 ? frame::metadata_words_at_top : 0),
579            "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
580            argsize(), closure._argsize, closure._callee_interpreted);
581 

 48 private:
 49   const frame& _f;
 50   const RegisterMapT* _map;
 51 
 52 public:
 53   FrameOopIterator(const frame& f, const RegisterMapT* map)
 54     : _f(f),
 55       _map(map) {
 56   }
 57 
 58   virtual void oops_do(OopClosure* cl) override {
 59     if (_f.is_interpreted_frame()) {
 60       _f.oops_interpreted_do(cl, nullptr);
 61     } else {
 62       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 63       visitor.oops_do(&_f, _map, _f.oop_map());
 64     }
 65   }
 66 };
 67 
 68 class LockStackOopIterator : public OopIterator {
 69 private:
 70   const stackChunkOop _chunk;
 71 public:
 72   LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
 73 
 74   virtual void oops_do(OopClosure* cl) override {
 75     int cnt = _chunk->lockStackSize();
 76     oop* lockstack_start = (oop*)_chunk->start_address();
 77     for (int i = 0; i < cnt; i++) {
 78       cl->do_oop(&lockstack_start[i]);
 79     }
 80   }
 81 };
 82 
 83 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 84   assert(!is_empty(), "");
 85   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 86 
 87   map->set_stack_chunk(this);
 88   fs.initialize_register_map(map);
 89 
 90   frame f = fs.to_frame();
 91 
 92   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 93   relativize_frame(f);
 94   f.set_frame_index(0);
 95   return f;
 96 }
 97 
 98 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 99   assert(map->in_cont(), "");
100   assert(!map->include_argument_oops(), "");
101   assert(!f.is_empty(), "");
102   assert(map->stack_chunk() == this, "");

222   stackChunkOop _chunk;
223   DerivedPointerClosureType* _cl;
224 
225 public:
226   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
227     : _chunk(chunk),
228       _cl(cl) {
229   }
230 
231   template <ChunkFrames frame_kind, typename RegisterMapT>
232   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
233     f.iterate_derived_pointers(_cl, map);
234 
235     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
236     frame fr = f.to_frame();
237     FrameOopIterator<RegisterMapT> iterator(fr, map);
238     bs_chunk->encode_gc_mode(_chunk, &iterator);
239 
240     return true;
241   }
242 
243   bool do_lockstack() {
244     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
245     LockStackOopIterator iterator(_chunk);
246     bs_chunk->encode_gc_mode(_chunk, &iterator);
247 
248     return true;
249   }
250 };
251 
252 bool stackChunkOopDesc::try_acquire_relativization() {
253   for (;;) {
254     // We use an acquiring load when reading the flags to ensure that if we leave this
255     // function thinking that relativization is finished, we know that if another thread
256     // did the relativization, we will still be able to observe the relativized derived
257     // pointers, which is important as subsequent modifications of derived pointers must
258     // happen after relativization.
259     uint8_t flags_before = flags_acquire();
260     if ((flags_before & FLAG_GC_MODE) != 0) {
261       // Terminal state - relativization is ensured
262       return false;
263     }
264 
265     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
266       // Someone else has claimed relativization - wait for completion
267       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
268       uint8_t flags_under_lock = flags_acquire();
269       if ((flags_under_lock & FLAG_GC_MODE) != 0) {

304       ml.notify_all();
305       return;
306     }
307 
308     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
309       // Successfully set the terminal state; we are done
310       return;
311     }
312   }
313 }
314 
315 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
316   if (!try_acquire_relativization()) {
317     // Already relativized
318     return;
319   }
320 
321   DerivedPointersSupport::RelativizeClosure derived_cl;
322   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
323   iterate_stack(&frame_cl);
324   frame_cl.do_lockstack();
325 
326   release_relativization();
327 }
328 
329 class TransformStackChunkClosure {
330   stackChunkOop _chunk;
331 
332 public:
333   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
334 
335   template <ChunkFrames frame_kind, typename RegisterMapT>
336   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
337     DerivedPointersSupport::RelativizeClosure derived_cl;
338     f.iterate_derived_pointers(&derived_cl, map);
339 
340     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
341     frame fr = f.to_frame();
342     FrameOopIterator<RegisterMapT> iterator(fr, map);
343     bs_chunk->encode_gc_mode(_chunk, &iterator);
344 
345     return true;
346   }
347 
348   bool do_lockstack() {
349     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
350     LockStackOopIterator iterator(_chunk);
351     bs_chunk->encode_gc_mode(_chunk, &iterator);
352 
353     return true;
354   }
355 };
356 
357 void stackChunkOopDesc::transform() {
358   assert(!is_gc_mode(), "Should only be called once per chunk");
359   set_gc_mode(true);
360 
361   assert(!has_bitmap(), "Should only be set once");
362   set_has_bitmap(true);
363   bitmap().clear();
364 
365   TransformStackChunkClosure closure(this);
366   iterate_stack(&closure);
367   closure.do_lockstack();
368 }
369 
370 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
371 class BarrierClosure: public OopClosure {
372   NOT_PRODUCT(intptr_t* _sp;)
373 
374 public:
375   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
376 
377   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
378   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
379 
380   template <class T> inline void do_oop_work(T* p) {
381     oop value = (oop)HeapAccess<>::oop_load(p);
382     if (barrier == stackChunkOopDesc::BarrierType::Store) {
383       HeapAccess<>::oop_store(p, value);
384     }
385   }
386 };
387 

424 template <typename RegisterMapT>
425 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
426   if (!(is_gc_mode() || requires_barriers())) {
427     return;
428   }
429 
430   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
431   FrameOopIterator<RegisterMapT> iterator(f, map);
432   bs_chunk->decode_gc_mode(this, &iterator);
433 
434   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
435     DerivedPointersSupport::DerelativizeClosure derived_closure;
436     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
437     visitor.oops_do(&f, map, f.oop_map());
438   }
439 }
440 
441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
443 
444 void stackChunkOopDesc::copy_lockstack(oop* dst) {
445   int cnt = lockStackSize();
446 
447   if (!(is_gc_mode() || requires_barriers())) {
448     oop* lockstack_start = (oop*)start_address();
449     for (int i = 0; i < cnt; i++) {
450       dst[i] = lockstack_start[i];
451       assert(oopDesc::is_oop(dst[i]), "not an oop");
452     }
453     return;
454   }
455 
456   if (has_bitmap() && UseCompressedOops) {
457     intptr_t* lockstack_start = start_address();
458     for (int i = 0; i < cnt; i++) {
459       oop mon_owner = HeapAccess<>::oop_load((narrowOop*)&lockstack_start[i]);
460       assert(oopDesc::is_oop(mon_owner), "not an oop");
461       dst[i] = mon_owner;
462     }
463   } else {
464     intptr_t* lockstack_start = start_address();
465     for (int i = 0; i < cnt; i++) {
466       oop mon_owner = HeapAccess<>::oop_load((oop*)&lockstack_start[i]);
467       assert(oopDesc::is_oop(mon_owner), "not an oop");
468       dst[i] = mon_owner;
469     }
470   }
471 }
472 
473 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
474   if (*((juint*)this) == badHeapWordVal) {
475     st->print_cr("BAD WORD");
476   } else if (*((juint*)this) == badMetaWordVal) {
477     st->print_cr("BAD META WORD");
478   } else {
479     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
480   }
481 }
482 
483 #ifdef ASSERT
484 
485 class StackChunkVerifyOopsClosure : public OopClosure {
486   stackChunkOop _chunk;
487   int _count;
488 
489 public:
490   StackChunkVerifyOopsClosure(stackChunkOop chunk)
491     : _chunk(chunk), _count(0) {}
492 

503     }
504   }
505 
506   int count() const { return _count; }
507 };
508 
509 class VerifyStackChunkFrameClosure {
510   stackChunkOop _chunk;
511 
512 public:
513   intptr_t* _sp;
514   CodeBlob* _cb;
515   bool _callee_interpreted;
516   int _size;
517   int _argsize;
518   int _num_oops;
519   int _num_frames;
520   int _num_interpreted_frames;
521   int _num_i2c;
522 
523   VerifyStackChunkFrameClosure(stackChunkOop chunk)
524     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
525       _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
526 
527   template <ChunkFrames frame_kind, typename RegisterMapT>
528   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
529     _sp = f.sp();
530     _cb = f.cb();
531 
532     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
533     int num_oops = f.num_oops();
534     assert(num_oops >= 0, "");
535 
536     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
537     _size     += fsize;
538     _num_oops += num_oops;
539     if (f.is_interpreted()) {
540       _num_interpreted_frames++;
541     }
542 
543     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
544     LogTarget(Trace, continuations) lt;
545     if (lt.develop_is_enabled()) {

599   assert(stack_size() >= 0, "");
600   assert(argsize() >= 0, "");
601   assert(!has_bitmap() || is_gc_mode(), "");
602 
603   if (is_empty()) {
604     assert(argsize() == 0, "");
605     assert(max_thawing_size() == 0, "");
606   }
607 
608   assert(oopDesc::is_oop_or_null(parent()), "");
609 
610   const bool concurrent = !Thread::current()->is_Java_thread();
611 
612   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
613   // for the top frame (below sp), and *not* for the bottom frame.
614   int size = stack_size() - argsize() - sp();
615   assert(size >= 0, "");
616   assert((size == 0) == is_empty(), "");
617 
618   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);

619 
620   VerifyStackChunkFrameClosure closure(this);


621   iterate_stack(&closure);
622 
623   assert(!is_empty() || closure._cb == nullptr, "");
624   if (closure._cb != nullptr && closure._cb->is_compiled()) {
625     assert(argsize() ==
626       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
627       "chunk argsize: %d bottom frame argsize: %d", argsize(),
628       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
629   }
630 
631   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
632 
633   if (!concurrent) {
634     assert(closure._size <= size + argsize() + frame::metadata_words,
635            "size: %d argsize: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
636            size, argsize(), closure._size, closure._sp - start_address(), sp(), stack_size());
637     assert(argsize() == closure._argsize - (closure._num_frames > 0 ? frame::metadata_words_at_top : 0),
638            "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
639            argsize(), closure._argsize, closure._callee_interpreted);
640 
< prev index next >