1 /*
  2  * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/compiledMethod.hpp"
 27 #include "code/scopeDesc.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetStackChunk.hpp"
 30 #include "logging/log.hpp"
 31 #include "logging/logStream.hpp"
 32 #include "memory/memRegion.hpp"
 33 #include "oops/instanceStackChunkKlass.inline.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "oops/stackChunkOop.inline.hpp"
 36 #include "runtime/frame.hpp"
 37 #include "runtime/registerMap.hpp"
 38 #include "runtime/smallRegisterMap.inline.hpp"
 39 #include "runtime/stackChunkFrameStream.inline.hpp"
 40 
 41 // Note: Some functions in this file work with stale object pointers, e.g.
 42 //       DerivedPointerSupport. Be extra careful to not put those pointers into
 43 //       variables of the 'oop' type. There's extra GC verification around oops
 44 //       that may fail when stale oops are being used.
 45 
 46 template <typename RegisterMapT>
 47 class FrameOopIterator : public OopIterator {
 48 private:
 49   const frame& _f;
 50   const RegisterMapT* _map;
 51 
 52 public:
 53   FrameOopIterator(const frame& f, const RegisterMapT* map)
 54     : _f(f),
 55       _map(map) {
 56   }
 57 
 58   virtual void oops_do(OopClosure* cl) override {
 59     if (_f.is_interpreted_frame()) {
 60       _f.oops_interpreted_do(cl, nullptr);
 61     } else {
 62       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 63       visitor.oops_do(&_f, _map, _f.oop_map());
 64     }
 65   }
 66 };
 67 
 68 class LockStackOopIterator : public OopIterator {
 69 private:
 70   const stackChunkOop _chunk;
 71 public:
 72   LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
 73 
 74   virtual void oops_do(OopClosure* cl) override {
 75     int cnt = _chunk->lockStackSize();
 76     oop* lockstack_start = (oop*)_chunk->start_address();
 77     for (int i = 0; i < cnt; i++) {
 78       cl->do_oop(&lockstack_start[i]);
 79     }
 80   }
 81 };
 82 
 83 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 84   assert(!is_empty(), "");
 85   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 86 
 87   map->set_stack_chunk(this);
 88   fs.initialize_register_map(map);
 89 
 90   frame f = fs.to_frame();
 91 
 92   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 93   relativize_frame(f);
 94   f.set_frame_index(0);
 95   return f;
 96 }
 97 
 98 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 99   assert(map->in_cont(), "");
100   assert(!map->include_argument_oops(), "");
101   assert(!f.is_empty(), "");
102   assert(map->stack_chunk() == this, "");
103   assert(!is_empty(), "");
104 
105   int index = f.frame_index(); // we need to capture the index before calling derelativize, which destroys it
106   StackChunkFrameStream<ChunkFrames::Mixed> fs(this, derelativize(f));
107   fs.next(map);
108 
109   if (!fs.is_done()) {
110     frame sender = fs.to_frame();
111     assert(is_usable_in_chunk(sender.unextended_sp()), "");
112     relativize_frame(sender);
113 
114     sender.set_frame_index(index+1);
115     return sender;
116   }
117 
118   if (parent() != nullptr) {
119     assert(!parent()->is_empty(), "");
120     return parent()->top_frame(map);
121   }
122 
123   return Continuation::continuation_parent_frame(map);
124 }
125 
126 static int num_java_frames(CompiledMethod* cm, address pc) {
127   int count = 0;
128   for (ScopeDesc* scope = cm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
129     count++;
130   }
131   return count;
132 }
133 
134 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
135   assert(f.is_interpreted()
136          || (f.cb() != nullptr && f.cb()->is_compiled() && f.cb()->as_compiled_method()->is_java_method()), "");
137   return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_compiled_method(), f.orig_pc());
138 }
139 
140 int stackChunkOopDesc::num_java_frames() const {
141   int n = 0;
142   for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
143        f.next(SmallRegisterMap::instance)) {
144     if (!f.is_stub()) {
145       n += ::num_java_frames(f);
146     }
147   }
148   return n;
149 }
150 
151 template <stackChunkOopDesc::BarrierType barrier>
152 class DoBarriersStackClosure {
153   const stackChunkOop _chunk;
154 
155 public:
156   DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
157 
158   template <ChunkFrames frame_kind, typename RegisterMapT>
159   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
160     _chunk->do_barriers0<barrier>(f, map);
161     return true;
162   }
163 };
164 
165 template <stackChunkOopDesc::BarrierType barrier>
166 void stackChunkOopDesc::do_barriers() {
167   DoBarriersStackClosure<barrier> closure(this);
168   iterate_stack(&closure);
169 }
170 
171 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Load> ();
172 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Store>();
173 
174 class DerivedPointersSupport {
175 public:
176   static void relativize(derived_base* base_loc, derived_pointer* derived_loc) {
177     // The base oop could be stale from the GC's point-of-view. Treat it as an
178     // uintptr_t to stay clear of the oop verification code in oopsHierarcy.hpp.
179     uintptr_t base = *(uintptr_t*)base_loc;
180     if (base == 0) {
181       return;
182     }
183     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
184 
185     // This is always a full derived pointer
186     uintptr_t derived_int_val = *(uintptr_t*)derived_loc;
187 
188     // Make the pointer an offset (relativize) and store it at the same location
189     uintptr_t offset = derived_int_val - base;
190     *(uintptr_t*)derived_loc = offset;
191   }
192 
193   static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) {
194     uintptr_t base = *(uintptr_t*)base_loc;
195     if (base == 0) {
196       return;
197     }
198     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
199 
200     // All derived pointers should have been relativized into offsets
201     uintptr_t offset = *(uintptr_t*)derived_loc;
202 
203     // Restore the original derived pointer
204     *(uintptr_t*)derived_loc = base + offset;
205   }
206 
207   struct RelativizeClosure : public DerivedOopClosure {
208     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
209       DerivedPointersSupport::relativize(base_loc, derived_loc);
210     }
211   };
212 
213   struct DerelativizeClosure : public DerivedOopClosure {
214     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
215       DerivedPointersSupport::derelativize(base_loc, derived_loc);
216     }
217   };
218 };
219 
220 template <typename DerivedPointerClosureType>
221 class EncodeGCModeConcurrentFrameClosure {
222   stackChunkOop _chunk;
223   DerivedPointerClosureType* _cl;
224 
225 public:
226   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
227     : _chunk(chunk),
228       _cl(cl) {
229   }
230 
231   template <ChunkFrames frame_kind, typename RegisterMapT>
232   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
233     f.iterate_derived_pointers(_cl, map);
234 
235     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
236     frame fr = f.to_frame();
237     FrameOopIterator<RegisterMapT> iterator(fr, map);
238     bs_chunk->encode_gc_mode(_chunk, &iterator);
239 
240     return true;
241   }
242 
243   bool do_lockstack() {
244     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
245     LockStackOopIterator iterator(_chunk);
246     bs_chunk->encode_gc_mode(_chunk, &iterator);
247 
248     return true;
249   }
250 };
251 
252 bool stackChunkOopDesc::try_acquire_relativization() {
253   for (;;) {
254     // We use an acquiring load when reading the flags to ensure that if we leave this
255     // function thinking that relativization is finished, we know that if another thread
256     // did the relativization, we will still be able to observe the relativized derived
257     // pointers, which is important as subsequent modifications of derived pointers must
258     // happen after relativization.
259     uint8_t flags_before = flags_acquire();
260     if ((flags_before & FLAG_GC_MODE) != 0) {
261       // Terminal state - relativization is ensured
262       return false;
263     }
264 
265     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
266       // Someone else has claimed relativization - wait for completion
267       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
268       uint8_t flags_under_lock = flags_acquire();
269       if ((flags_under_lock & FLAG_GC_MODE) != 0) {
270         // Terminal state - relativization is ensured
271         return false;
272       }
273 
274       if ((flags_under_lock & FLAG_NOTIFY_RELATIVIZE) != 0) {
275         // Relativization is claimed by another thread, and it knows it needs to notify
276         ml.wait();
277       } else if (try_set_flags(flags_under_lock, flags_under_lock | FLAG_NOTIFY_RELATIVIZE)) {
278         // Relativization is claimed by another thread, and it knows it needs to notify
279         ml.wait();
280       }
281       // Retry - rerun the loop
282       continue;
283     }
284 
285     if (try_set_flags(flags_before, flags_before | FLAG_CLAIM_RELATIVIZE)) {
286       // Claimed relativization - let's do it
287       return true;
288     }
289   }
290 }
291 
292 void stackChunkOopDesc::release_relativization() {
293   for (;;) {
294     uint8_t flags_before = flags();
295     if ((flags_before & FLAG_NOTIFY_RELATIVIZE) != 0) {
296       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
297       // No need to CAS the terminal state; nobody else can be racingly mutating here
298       // as both claim and notify flags are already set (and monotonic)
299       // We do however need to use a releasing store on the flags, to ensure that
300       // the reader of that value (using load_acquire) will be able to observe
301       // the relativization of the derived pointers
302       uint8_t flags_under_lock = flags();
303       release_set_flags(flags_under_lock | FLAG_GC_MODE);
304       ml.notify_all();
305       return;
306     }
307 
308     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
309       // Successfully set the terminal state; we are done
310       return;
311     }
312   }
313 }
314 
315 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
316   if (!try_acquire_relativization()) {
317     // Already relativized
318     return;
319   }
320 
321   DerivedPointersSupport::RelativizeClosure derived_cl;
322   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
323   iterate_stack(&frame_cl);
324   frame_cl.do_lockstack();
325 
326   release_relativization();
327 }
328 
329 class TransformStackChunkClosure {
330   stackChunkOop _chunk;
331 
332 public:
333   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
334 
335   template <ChunkFrames frame_kind, typename RegisterMapT>
336   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
337     DerivedPointersSupport::RelativizeClosure derived_cl;
338     f.iterate_derived_pointers(&derived_cl, map);
339 
340     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
341     frame fr = f.to_frame();
342     FrameOopIterator<RegisterMapT> iterator(fr, map);
343     bs_chunk->encode_gc_mode(_chunk, &iterator);
344 
345     return true;
346   }
347 
348   bool do_lockstack() {
349     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
350     LockStackOopIterator iterator(_chunk);
351     bs_chunk->encode_gc_mode(_chunk, &iterator);
352 
353     return true;
354   }
355 };
356 
357 void stackChunkOopDesc::transform() {
358   assert(!is_gc_mode(), "Should only be called once per chunk");
359   set_gc_mode(true);
360 
361   assert(!has_bitmap(), "Should only be set once");
362   set_has_bitmap(true);
363   bitmap().clear();
364 
365   TransformStackChunkClosure closure(this);
366   iterate_stack(&closure);
367   closure.do_lockstack();
368 }
369 
370 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
371 class BarrierClosure: public OopClosure {
372   NOT_PRODUCT(intptr_t* _sp;)
373 
374 public:
375   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
376 
377   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
378   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
379 
380   template <class T> inline void do_oop_work(T* p) {
381     oop value = (oop)HeapAccess<>::oop_load(p);
382     if (barrier == stackChunkOopDesc::BarrierType::Store) {
383       HeapAccess<>::oop_store(p, value);
384     }
385   }
386 };
387 
388 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
389 void stackChunkOopDesc::do_barriers0(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
390   // We need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned
391   assert (!f.is_done(), "");
392 
393   if (f.is_interpreted()) {
394     Method* m = f.to_frame().interpreter_frame_method();
395     // Class redefinition support
396     m->record_gc_epoch();
397   } else if (f.is_compiled()) {
398     nmethod* nm = f.cb()->as_nmethod();
399     // The entry barrier takes care of having the right synchronization
400     // when keeping the nmethod alive during concurrent execution.
401     nm->run_nmethod_entry_barrier();
402     // There is no need to mark the Method, as class redefinition will walk the
403     // CodeCache, noting their Methods
404   }
405 
406   if (has_bitmap() && UseCompressedOops) {
407     BarrierClosure<barrier, true> oops_closure(f.sp());
408     f.iterate_oops(&oops_closure, map);
409   } else {
410     BarrierClosure<barrier, false> oops_closure(f.sp());
411     f.iterate_oops(&oops_closure, map);
412   }
413 }
414 
415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
422 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
423 
424 template <typename RegisterMapT>
425 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
426   if (!(is_gc_mode() || requires_barriers())) {
427     return;
428   }
429 
430   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
431   FrameOopIterator<RegisterMapT> iterator(f, map);
432   bs_chunk->decode_gc_mode(this, &iterator);
433 
434   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
435     DerivedPointersSupport::DerelativizeClosure derived_closure;
436     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
437     visitor.oops_do(&f, map, f.oop_map());
438   }
439 }
440 
441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
443 
444 void stackChunkOopDesc::copy_lockstack(oop* dst) {
445   int cnt = lockStackSize();
446 
447   if (!(is_gc_mode() || requires_barriers())) {
448     oop* lockstack_start = (oop*)start_address();
449     for (int i = 0; i < cnt; i++) {
450       dst[i] = lockstack_start[i];
451       assert(oopDesc::is_oop(dst[i]), "not an oop");
452     }
453     return;
454   }
455 
456   if (has_bitmap() && UseCompressedOops) {
457     intptr_t* lockstack_start = start_address();
458     for (int i = 0; i < cnt; i++) {
459       oop mon_owner = HeapAccess<>::oop_load((narrowOop*)&lockstack_start[i]);
460       assert(oopDesc::is_oop(mon_owner), "not an oop");
461       dst[i] = mon_owner;
462     }
463   } else {
464     intptr_t* lockstack_start = start_address();
465     for (int i = 0; i < cnt; i++) {
466       oop mon_owner = HeapAccess<>::oop_load((oop*)&lockstack_start[i]);
467       assert(oopDesc::is_oop(mon_owner), "not an oop");
468       dst[i] = mon_owner;
469     }
470   }
471 }
472 
473 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
474   if (*((juint*)this) == badHeapWordVal) {
475     st->print_cr("BAD WORD");
476   } else if (*((juint*)this) == badMetaWordVal) {
477     st->print_cr("BAD META WORD");
478   } else {
479     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
480   }
481 }
482 
483 #ifdef ASSERT
484 
485 class StackChunkVerifyOopsClosure : public OopClosure {
486   stackChunkOop _chunk;
487   int _count;
488 
489 public:
490   StackChunkVerifyOopsClosure(stackChunkOop chunk)
491     : _chunk(chunk), _count(0) {}
492 
493   void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
494   void do_oop(narrowOop* p) override { do_oop_work(p); }
495 
496   template <typename T> inline void do_oop_work(T* p) {
497     _count++;
498     oop obj = _chunk->load_oop(p);
499     assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
500     if (_chunk->has_bitmap()) {
501       BitMap::idx_t index = _chunk->bit_index_for(p);
502       assert(_chunk->bitmap().at(index), "Bit not set at index " SIZE_FORMAT " corresponding to " PTR_FORMAT, index, p2i(p));
503     }
504   }
505 
506   int count() const { return _count; }
507 };
508 
509 class VerifyStackChunkFrameClosure {
510   stackChunkOop _chunk;
511 
512 public:
513   intptr_t* _sp;
514   CodeBlob* _cb;
515   bool _callee_interpreted;
516   int _size;
517   int _argsize;
518   int _num_oops;
519   int _num_frames;
520   int _num_interpreted_frames;
521   int _num_i2c;
522 
523   VerifyStackChunkFrameClosure(stackChunkOop chunk)
524     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
525       _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
526 
527   template <ChunkFrames frame_kind, typename RegisterMapT>
528   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
529     _sp = f.sp();
530     _cb = f.cb();
531 
532     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
533     int num_oops = f.num_oops();
534     assert(num_oops >= 0, "");
535 
536     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
537     _size     += fsize;
538     _num_oops += num_oops;
539     if (f.is_interpreted()) {
540       _num_interpreted_frames++;
541     }
542 
543     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
544     LogTarget(Trace, continuations) lt;
545     if (lt.develop_is_enabled()) {
546       LogStream ls(lt);
547       f.print_on(&ls);
548     }
549     assert(f.pc() != nullptr,
550            "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
551            !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
552 
553     if (_num_frames == 0) {
554       assert(f.pc() == _chunk->pc(), "");
555     }
556 
557     if (_num_frames > 0 && !_callee_interpreted && f.is_interpreted()) {
558       log_develop_trace(continuations)("debug_verify_stack_chunk i2c");
559       _num_i2c++;
560     }
561 
562     StackChunkVerifyOopsClosure oops_closure(_chunk);
563     f.iterate_oops(&oops_closure, map);
564     assert(oops_closure.count() == num_oops, "oops: %d oopmap->num_oops(): %d", oops_closure.count(), num_oops);
565 
566     _callee_interpreted = f.is_interpreted();
567     _num_frames++;
568     return true;
569   }
570 };
571 
572 template <typename T>
573 class StackChunkVerifyBitmapClosure : public BitMapClosure {
574   stackChunkOop _chunk;
575 
576 public:
577   int _count;
578 
579   StackChunkVerifyBitmapClosure(stackChunkOop chunk) : _chunk(chunk), _count(0) {}
580 
581   bool do_bit(BitMap::idx_t index) override {
582     T* p = _chunk->address_for_bit<T>(index);
583     _count++;
584 
585     oop obj = _chunk->load_oop(p);
586     assert(obj == nullptr || dbg_is_good_oop(obj),
587            "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: " SIZE_FORMAT,
588            p2i(p), p2i((oopDesc*)obj), index);
589 
590     return true; // continue processing
591   }
592 };
593 
594 bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, int* out_interpreted_frames) {
595   DEBUG_ONLY(if (!VerifyContinuations) return true;)
596 
597   assert(oopDesc::is_oop(this), "");
598 
599   assert(stack_size() >= 0, "");
600   assert(argsize() >= 0, "");
601   assert(!has_bitmap() || is_gc_mode(), "");
602 
603   if (is_empty()) {
604     assert(argsize() == 0, "");
605     assert(max_thawing_size() == 0, "");
606   }
607 
608   assert(oopDesc::is_oop_or_null(parent()), "");
609 
610   const bool concurrent = !Thread::current()->is_Java_thread();
611 
612   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
613   // for the top frame (below sp), and *not* for the bottom frame.
614   int size = stack_size() - argsize() - sp();
615   assert(size >= 0, "");
616   assert((size == 0) == is_empty(), "");
617 
618   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
619 
620   VerifyStackChunkFrameClosure closure(this);
621   iterate_stack(&closure);
622 
623   assert(!is_empty() || closure._cb == nullptr, "");
624   if (closure._cb != nullptr && closure._cb->is_compiled()) {
625     assert(argsize() ==
626       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
627       "chunk argsize: %d bottom frame argsize: %d", argsize(),
628       (closure._cb->as_compiled_method()->method()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
629   }
630 
631   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
632 
633   if (!concurrent) {
634     assert(closure._size <= size + argsize() + frame::metadata_words,
635            "size: %d argsize: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
636            size, argsize(), closure._size, closure._sp - start_address(), sp(), stack_size());
637     assert(argsize() == closure._argsize - (closure._num_frames > 0 ? frame::metadata_words_at_top : 0),
638            "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
639            argsize(), closure._argsize, closure._callee_interpreted);
640 
641     int calculated_max_size = closure._size
642                               + closure._num_i2c * frame::align_wiggle
643                               + closure._num_interpreted_frames * frame::align_wiggle;
644     assert(max_thawing_size() == calculated_max_size,
645            "max_size(): %d calculated_max_size: %d argsize: %d num_i2c: %d",
646            max_thawing_size(), calculated_max_size, closure._argsize, closure._num_i2c);
647 
648     if (out_size   != nullptr) *out_size   += size;
649     if (out_oops   != nullptr) *out_oops   += closure._num_oops;
650     if (out_frames != nullptr) *out_frames += closure._num_frames;
651     if (out_interpreted_frames != nullptr) *out_interpreted_frames += closure._num_interpreted_frames;
652   } else {
653     assert(out_size == nullptr, "");
654     assert(out_oops == nullptr, "");
655     assert(out_frames == nullptr, "");
656     assert(out_interpreted_frames == nullptr, "");
657   }
658 
659   if (has_bitmap()) {
660     assert(bitmap().size() == InstanceStackChunkKlass::bitmap_size_in_bits(stack_size()),
661            "bitmap().size(): %zu stack_size: %d",
662            bitmap().size(), stack_size());
663 
664     int oop_count;
665     if (UseCompressedOops) {
666       StackChunkVerifyBitmapClosure<narrowOop> bitmap_closure(this);
667       bitmap().iterate(&bitmap_closure,
668                        bit_index_for((narrowOop*)(sp_address() - frame::metadata_words_at_bottom)),
669                        bit_index_for((narrowOop*)end_address()));
670       oop_count = bitmap_closure._count;
671     } else {
672       StackChunkVerifyBitmapClosure<oop> bitmap_closure(this);
673       bitmap().iterate(&bitmap_closure,
674                        bit_index_for((oop*)(sp_address() - frame::metadata_words_at_bottom)),
675                        bit_index_for((oop*)end_address()));
676       oop_count = bitmap_closure._count;
677     }
678     assert(oop_count == closure._num_oops,
679            "bitmap_closure._count: %d closure._num_oops: %d", oop_count, closure._num_oops);
680   }
681 
682   return true;
683 }
684 #endif