48 private:
49 const frame& _f;
50 const RegisterMapT* _map;
51
52 public:
53 FrameOopIterator(const frame& f, const RegisterMapT* map)
54 : _f(f),
55 _map(map) {
56 }
57
58 virtual void oops_do(OopClosure* cl) override {
59 if (_f.is_interpreted_frame()) {
60 _f.oops_interpreted_do(cl, nullptr);
61 } else {
62 OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
63 visitor.oops_do(&_f, _map, _f.oop_map());
64 }
65 }
66 };
67
68 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
69 assert(!is_empty(), "");
70 StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
71
72 map->set_stack_chunk(this);
73 fs.initialize_register_map(map);
74
75 frame f = fs.to_frame();
76
77 assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
78 relativize_frame(f);
79 f.set_frame_index(0);
80 return f;
81 }
82
83 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
84 assert(map->in_cont(), "");
85 assert(!map->include_argument_oops(), "");
86 assert(!f.is_empty(), "");
87 assert(map->stack_chunk() == this, "");
207 stackChunkOop _chunk;
208 DerivedPointerClosureType* _cl;
209
210 public:
211 EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
212 : _chunk(chunk),
213 _cl(cl) {
214 }
215
216 template <ChunkFrames frame_kind, typename RegisterMapT>
217 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
218 f.iterate_derived_pointers(_cl, map);
219
220 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
221 frame fr = f.to_frame();
222 FrameOopIterator<RegisterMapT> iterator(fr, map);
223 bs_chunk->encode_gc_mode(_chunk, &iterator);
224
225 return true;
226 }
227 };
228
229 bool stackChunkOopDesc::try_acquire_relativization() {
230 for (;;) {
231 // We use an acquiring load when reading the flags to ensure that if we leave this
232 // function thinking that relativization is finished, we know that if another thread
233 // did the relativization, we will still be able to observe the relativized derived
234 // pointers, which is important as subsequent modifications of derived pointers must
235 // happen after relativization.
236 uint8_t flags_before = flags_acquire();
237 if ((flags_before & FLAG_GC_MODE) != 0) {
238 // Terminal state - relativization is ensured
239 return false;
240 }
241
242 if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
243 // Someone else has claimed relativization - wait for completion
244 MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
245 uint8_t flags_under_lock = flags_acquire();
246 if ((flags_under_lock & FLAG_GC_MODE) != 0) {
281 ml.notify_all();
282 return;
283 }
284
285 if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
286 // Successfully set the terminal state; we are done
287 return;
288 }
289 }
290 }
291
292 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
293 if (!try_acquire_relativization()) {
294 // Already relativized
295 return;
296 }
297
298 DerivedPointersSupport::RelativizeClosure derived_cl;
299 EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
300 iterate_stack(&frame_cl);
301
302 release_relativization();
303 }
304
305 class TransformStackChunkClosure {
306 stackChunkOop _chunk;
307
308 public:
309 TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
310
311 template <ChunkFrames frame_kind, typename RegisterMapT>
312 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
313 DerivedPointersSupport::RelativizeClosure derived_cl;
314 f.iterate_derived_pointers(&derived_cl, map);
315
316 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
317 frame fr = f.to_frame();
318 FrameOopIterator<RegisterMapT> iterator(fr, map);
319 bs_chunk->encode_gc_mode(_chunk, &iterator);
320
321 return true;
322 }
323 };
324
325 void stackChunkOopDesc::transform() {
326 assert(!is_gc_mode(), "Should only be called once per chunk");
327 set_gc_mode(true);
328
329 assert(!has_bitmap(), "Should only be set once");
330 set_has_bitmap(true);
331 bitmap().clear();
332
333 TransformStackChunkClosure closure(this);
334 iterate_stack(&closure);
335 }
336
337 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
338 class BarrierClosure: public OopClosure {
339 NOT_PRODUCT(intptr_t* _sp;)
340
341 public:
342 BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
343
344 virtual void do_oop(oop* p) override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
345 virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
346
347 template <class T> inline void do_oop_work(T* p) {
348 oop value = (oop)HeapAccess<>::oop_load(p);
349 if (barrier == stackChunkOopDesc::BarrierType::Store) {
350 HeapAccess<>::oop_store(p, value);
351 }
352 }
353 };
354
391 template <typename RegisterMapT>
392 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
393 if (!(is_gc_mode() || requires_barriers())) {
394 return;
395 }
396
397 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
398 FrameOopIterator<RegisterMapT> iterator(f, map);
399 bs_chunk->decode_gc_mode(this, &iterator);
400
401 if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
402 DerivedPointersSupport::DerelativizeClosure derived_closure;
403 OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
404 visitor.oops_do(&f, map, f.oop_map());
405 }
406 }
407
408 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
409 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
410
411 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
412 if (*((juint*)this) == badHeapWordVal) {
413 st->print_cr("BAD WORD");
414 } else {
415 InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
416 }
417 }
418
419 #ifdef ASSERT
420
421 class StackChunkVerifyOopsClosure : public OopClosure {
422 stackChunkOop _chunk;
423 int _count;
424
425 public:
426 StackChunkVerifyOopsClosure(stackChunkOop chunk)
427 : _chunk(chunk), _count(0) {}
428
429 void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
430 void do_oop(narrowOop* p) override { do_oop_work(p); }
439 }
440 }
441
442 int count() const { return _count; }
443 };
444
445 class VerifyStackChunkFrameClosure {
446 stackChunkOop _chunk;
447
448 public:
449 intptr_t* _sp;
450 CodeBlob* _cb;
451 bool _callee_interpreted;
452 int _size;
453 int _argsize;
454 int _num_oops;
455 int _num_frames;
456 int _num_interpreted_frames;
457 int _num_i2c;
458
459 VerifyStackChunkFrameClosure(stackChunkOop chunk, int num_frames, int size)
460 : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
461 _size(size), _argsize(0), _num_oops(0), _num_frames(num_frames), _num_interpreted_frames(0), _num_i2c(0) {}
462
463 template <ChunkFrames frame_kind, typename RegisterMapT>
464 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
465 _sp = f.sp();
466 _cb = f.cb();
467
468 int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
469 int num_oops = f.num_oops();
470 assert(num_oops >= 0, "");
471
472 _argsize = f.stack_argsize() + frame::metadata_words_at_top;
473 _size += fsize;
474 _num_oops += num_oops;
475 if (f.is_interpreted()) {
476 _num_interpreted_frames++;
477 }
478
479 log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
480 LogTarget(Trace, continuations) lt;
481 if (lt.develop_is_enabled()) {
535 assert(stack_size() >= 0, "");
536 assert(!has_bitmap() || is_gc_mode(), "");
537
538 if (is_empty()) {
539 assert(max_thawing_size() == 0, "");
540 } else {
541 assert(argsize() >= 0, "");
542 }
543
544 assert(oopDesc::is_oop_or_null(parent()), "");
545
546 const bool concurrent = !Thread::current()->is_Java_thread();
547
548 // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
549 // for the top frame (below sp), and *not* for the bottom frame.
550 int size = bottom() - sp();
551 assert(size >= 0, "");
552 assert((size == 0) == is_empty(), "");
553
554 const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
555 const bool has_safepoint_stub_frame = first.is_stub();
556
557 VerifyStackChunkFrameClosure closure(this,
558 has_safepoint_stub_frame ? 1 : 0, // Iterate_stack skips the safepoint stub
559 has_safepoint_stub_frame ? first.frame_size() : 0);
560 iterate_stack(&closure);
561
562 assert(!is_empty() || closure._cb == nullptr, "");
563 if (closure._cb != nullptr && closure._cb->is_nmethod()) {
564 assert(argsize() ==
565 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
566 "chunk argsize: %d bottom frame argsize: %d", argsize(),
567 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
568 }
569
570 assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
571
572 if (!concurrent) {
573 assert(closure._size <= size + (stack_size() - bottom()),
574 "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
575 size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
576 if (closure._num_frames > 0) {
577 assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
578 assert(argsize() == closure._argsize - frame::metadata_words_at_top,
579 "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
|
48 private:
49 const frame& _f;
50 const RegisterMapT* _map;
51
52 public:
53 FrameOopIterator(const frame& f, const RegisterMapT* map)
54 : _f(f),
55 _map(map) {
56 }
57
58 virtual void oops_do(OopClosure* cl) override {
59 if (_f.is_interpreted_frame()) {
60 _f.oops_interpreted_do(cl, nullptr);
61 } else {
62 OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
63 visitor.oops_do(&_f, _map, _f.oop_map());
64 }
65 }
66 };
67
68 class LockStackOopIterator : public OopIterator {
69 private:
70 const stackChunkOop _chunk;
71 public:
72 LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
73
74 virtual void oops_do(OopClosure* cl) override {
75 int cnt = _chunk->lockstack_size();
76 oop* lockstack_start = (oop*)_chunk->start_address();
77 for (int i = 0; i < cnt; i++) {
78 cl->do_oop(&lockstack_start[i]);
79 }
80 }
81 };
82
83 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
84 assert(!is_empty(), "");
85 StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
86
87 map->set_stack_chunk(this);
88 fs.initialize_register_map(map);
89
90 frame f = fs.to_frame();
91
92 assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
93 relativize_frame(f);
94 f.set_frame_index(0);
95 return f;
96 }
97
98 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
99 assert(map->in_cont(), "");
100 assert(!map->include_argument_oops(), "");
101 assert(!f.is_empty(), "");
102 assert(map->stack_chunk() == this, "");
222 stackChunkOop _chunk;
223 DerivedPointerClosureType* _cl;
224
225 public:
226 EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
227 : _chunk(chunk),
228 _cl(cl) {
229 }
230
231 template <ChunkFrames frame_kind, typename RegisterMapT>
232 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
233 f.iterate_derived_pointers(_cl, map);
234
235 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
236 frame fr = f.to_frame();
237 FrameOopIterator<RegisterMapT> iterator(fr, map);
238 bs_chunk->encode_gc_mode(_chunk, &iterator);
239
240 return true;
241 }
242
243 bool do_lockstack() {
244 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
245 LockStackOopIterator iterator(_chunk);
246 bs_chunk->encode_gc_mode(_chunk, &iterator);
247
248 return true;
249 }
250 };
251
252 bool stackChunkOopDesc::try_acquire_relativization() {
253 for (;;) {
254 // We use an acquiring load when reading the flags to ensure that if we leave this
255 // function thinking that relativization is finished, we know that if another thread
256 // did the relativization, we will still be able to observe the relativized derived
257 // pointers, which is important as subsequent modifications of derived pointers must
258 // happen after relativization.
259 uint8_t flags_before = flags_acquire();
260 if ((flags_before & FLAG_GC_MODE) != 0) {
261 // Terminal state - relativization is ensured
262 return false;
263 }
264
265 if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
266 // Someone else has claimed relativization - wait for completion
267 MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
268 uint8_t flags_under_lock = flags_acquire();
269 if ((flags_under_lock & FLAG_GC_MODE) != 0) {
304 ml.notify_all();
305 return;
306 }
307
308 if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
309 // Successfully set the terminal state; we are done
310 return;
311 }
312 }
313 }
314
315 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
316 if (!try_acquire_relativization()) {
317 // Already relativized
318 return;
319 }
320
321 DerivedPointersSupport::RelativizeClosure derived_cl;
322 EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
323 iterate_stack(&frame_cl);
324 frame_cl.do_lockstack();
325
326 release_relativization();
327 }
328
329 class TransformStackChunkClosure {
330 stackChunkOop _chunk;
331
332 public:
333 TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
334
335 template <ChunkFrames frame_kind, typename RegisterMapT>
336 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
337 DerivedPointersSupport::RelativizeClosure derived_cl;
338 f.iterate_derived_pointers(&derived_cl, map);
339
340 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
341 frame fr = f.to_frame();
342 FrameOopIterator<RegisterMapT> iterator(fr, map);
343 bs_chunk->encode_gc_mode(_chunk, &iterator);
344
345 return true;
346 }
347
348 bool do_lockstack() {
349 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
350 LockStackOopIterator iterator(_chunk);
351 bs_chunk->encode_gc_mode(_chunk, &iterator);
352
353 return true;
354 }
355 };
356
357 void stackChunkOopDesc::transform() {
358 assert(!is_gc_mode(), "Should only be called once per chunk");
359 set_gc_mode(true);
360
361 assert(!has_bitmap(), "Should only be set once");
362 set_has_bitmap(true);
363 bitmap().clear();
364
365 TransformStackChunkClosure closure(this);
366 iterate_stack(&closure);
367 closure.do_lockstack();
368 }
369
370 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
371 class BarrierClosure: public OopClosure {
372 NOT_PRODUCT(intptr_t* _sp;)
373
374 public:
375 BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
376
377 virtual void do_oop(oop* p) override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
378 virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
379
380 template <class T> inline void do_oop_work(T* p) {
381 oop value = (oop)HeapAccess<>::oop_load(p);
382 if (barrier == stackChunkOopDesc::BarrierType::Store) {
383 HeapAccess<>::oop_store(p, value);
384 }
385 }
386 };
387
424 template <typename RegisterMapT>
425 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
426 if (!(is_gc_mode() || requires_barriers())) {
427 return;
428 }
429
430 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
431 FrameOopIterator<RegisterMapT> iterator(f, map);
432 bs_chunk->decode_gc_mode(this, &iterator);
433
434 if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
435 DerivedPointersSupport::DerelativizeClosure derived_closure;
436 OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
437 visitor.oops_do(&f, map, f.oop_map());
438 }
439 }
440
441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
443
444 void stackChunkOopDesc::copy_lockstack(oop* dst) {
445 int cnt = lockstack_size();
446
447 if (!(is_gc_mode() || requires_barriers())) {
448 oop* lockstack_start = (oop*)start_address();
449 for (int i = 0; i < cnt; i++) {
450 dst[i] = lockstack_start[i];
451 assert(oopDesc::is_oop(dst[i]), "not an oop");
452 }
453 return;
454 }
455
456 if (has_bitmap() && UseCompressedOops) {
457 intptr_t* lockstack_start = start_address();
458 for (int i = 0; i < cnt; i++) {
459 oop mon_owner = HeapAccess<>::oop_load((narrowOop*)&lockstack_start[i]);
460 assert(oopDesc::is_oop(mon_owner), "not an oop");
461 dst[i] = mon_owner;
462 }
463 } else {
464 intptr_t* lockstack_start = start_address();
465 for (int i = 0; i < cnt; i++) {
466 oop mon_owner = HeapAccess<>::oop_load((oop*)&lockstack_start[i]);
467 assert(oopDesc::is_oop(mon_owner), "not an oop");
468 dst[i] = mon_owner;
469 }
470 }
471 }
472
473 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
474 if (*((juint*)this) == badHeapWordVal) {
475 st->print_cr("BAD WORD");
476 } else {
477 InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
478 }
479 }
480
481 #ifdef ASSERT
482
483 class StackChunkVerifyOopsClosure : public OopClosure {
484 stackChunkOop _chunk;
485 int _count;
486
487 public:
488 StackChunkVerifyOopsClosure(stackChunkOop chunk)
489 : _chunk(chunk), _count(0) {}
490
491 void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
492 void do_oop(narrowOop* p) override { do_oop_work(p); }
501 }
502 }
503
504 int count() const { return _count; }
505 };
506
507 class VerifyStackChunkFrameClosure {
508 stackChunkOop _chunk;
509
510 public:
511 intptr_t* _sp;
512 CodeBlob* _cb;
513 bool _callee_interpreted;
514 int _size;
515 int _argsize;
516 int _num_oops;
517 int _num_frames;
518 int _num_interpreted_frames;
519 int _num_i2c;
520
521 VerifyStackChunkFrameClosure(stackChunkOop chunk)
522 : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
523 _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
524
525 template <ChunkFrames frame_kind, typename RegisterMapT>
526 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
527 _sp = f.sp();
528 _cb = f.cb();
529
530 int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
531 int num_oops = f.num_oops();
532 assert(num_oops >= 0, "");
533
534 _argsize = f.stack_argsize() + frame::metadata_words_at_top;
535 _size += fsize;
536 _num_oops += num_oops;
537 if (f.is_interpreted()) {
538 _num_interpreted_frames++;
539 }
540
541 log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
542 LogTarget(Trace, continuations) lt;
543 if (lt.develop_is_enabled()) {
597 assert(stack_size() >= 0, "");
598 assert(!has_bitmap() || is_gc_mode(), "");
599
600 if (is_empty()) {
601 assert(max_thawing_size() == 0, "");
602 } else {
603 assert(argsize() >= 0, "");
604 }
605
606 assert(oopDesc::is_oop_or_null(parent()), "");
607
608 const bool concurrent = !Thread::current()->is_Java_thread();
609
610 // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
611 // for the top frame (below sp), and *not* for the bottom frame.
612 int size = bottom() - sp();
613 assert(size >= 0, "");
614 assert((size == 0) == is_empty(), "");
615
616 const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
617
618 VerifyStackChunkFrameClosure closure(this);
619 iterate_stack(&closure);
620
621 assert(!is_empty() || closure._cb == nullptr, "");
622 if (closure._cb != nullptr && closure._cb->is_nmethod()) {
623 assert(argsize() ==
624 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
625 "chunk argsize: %d bottom frame argsize: %d", argsize(),
626 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
627 }
628
629 assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
630
631 if (!concurrent) {
632 assert(closure._size <= size + (stack_size() - bottom()),
633 "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
634 size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
635 if (closure._num_frames > 0) {
636 assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
637 assert(argsize() == closure._argsize - frame::metadata_words_at_top,
638 "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
|