1 /*
  2  * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "code/nmethod.hpp"
 26 #include "code/scopeDesc.hpp"
 27 #include "gc/shared/barrierSet.hpp"
 28 #include "gc/shared/barrierSetStackChunk.hpp"
 29 #include "logging/log.hpp"
 30 #include "logging/logStream.hpp"
 31 #include "memory/memRegion.hpp"
 32 #include "oops/instanceStackChunkKlass.inline.hpp"
 33 #include "oops/oop.inline.hpp"
 34 #include "oops/stackChunkOop.inline.hpp"
 35 #include "runtime/frame.hpp"
 36 #include "runtime/registerMap.hpp"
 37 #include "runtime/smallRegisterMap.inline.hpp"
 38 #include "runtime/stackChunkFrameStream.inline.hpp"
 39 
 40 // Note: Some functions in this file work with stale object pointers, e.g.
 41 //       DerivedPointerSupport. Be extra careful to not put those pointers into
 42 //       variables of the 'oop' type. There's extra GC verification around oops
 43 //       that may fail when stale oops are being used.
 44 
 45 template <typename RegisterMapT>
 46 class FrameOopIterator : public OopIterator {
 47 private:
 48   const frame& _f;
 49   const RegisterMapT* _map;
 50 
 51 public:
 52   FrameOopIterator(const frame& f, const RegisterMapT* map)
 53     : _f(f),
 54       _map(map) {
 55   }
 56 
 57   virtual void oops_do(OopClosure* cl) override {
 58     if (_f.is_interpreted_frame()) {
 59       _f.oops_interpreted_do(cl, _map);
 60     } else {
 61       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 62       visitor.oops_do(&_f, _map, _f.oop_map());
 63     }
 64   }
 65 };
 66 
 67 class LockStackOopIterator : public OopIterator {
 68 private:
 69   const stackChunkOop _chunk;
 70 public:
 71   LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
 72 
 73   virtual void oops_do(OopClosure* cl) override {
 74     int cnt = _chunk->lockstack_size();
 75     oop* lockstack_start = (oop*)_chunk->start_address();
 76     for (int i = 0; i < cnt; i++) {
 77       cl->do_oop(&lockstack_start[i]);
 78     }
 79   }
 80 };
 81 
 82 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 83   assert(!is_empty(), "");
 84   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 85 
 86   map->set_stack_chunk(this);
 87   fs.initialize_register_map(map);
 88 
 89   frame f = fs.to_frame();
 90 
 91   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 92   relativize_frame(f);
 93   f.set_frame_index(0);
 94   return f;
 95 }
 96 
 97 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 98   assert(map->in_cont(), "");
 99   assert(!map->include_argument_oops(), "");
100   assert(!f.is_empty(), "");
101   assert(map->stack_chunk() == this, "");
102   assert(!is_empty(), "");
103 
104   int index = f.frame_index(); // we need to capture the index before calling derelativize, which destroys it
105   StackChunkFrameStream<ChunkFrames::Mixed> fs(this, derelativize(f));
106   fs.next(map);
107 
108   if (!fs.is_done()) {
109     frame sender = fs.to_frame();
110     assert(is_usable_in_chunk(sender.unextended_sp()), "");
111     relativize_frame(sender);
112 
113     sender.set_frame_index(index+1);
114     return sender;
115   }
116 
117   if (parent() != nullptr) {
118     assert(!parent()->is_empty(), "");
119     return parent()->top_frame(map);
120   }
121 
122   return Continuation::continuation_parent_frame(map);
123 }
124 
125 static int num_java_frames(nmethod* nm, address pc) {
126   int count = 0;
127   for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
128     count++;
129   }
130   return count;
131 }
132 
133 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
134   assert(f.is_interpreted()
135          || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
136   return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
137 }
138 
139 int stackChunkOopDesc::num_java_frames() const {
140   int n = 0;
141   for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
142        f.next(SmallRegisterMap::instance_no_args())) {
143     if (!f.is_stub()) {
144       n += ::num_java_frames(f);
145     }
146   }
147   return n;
148 }
149 
150 template <stackChunkOopDesc::BarrierType barrier>
151 class DoBarriersStackClosure {
152   const stackChunkOop _chunk;
153 
154 public:
155   DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
156 
157   template <ChunkFrames frame_kind, typename RegisterMapT>
158   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
159     _chunk->do_barriers0<barrier>(f, map);
160     return true;
161   }
162 };
163 
164 template <stackChunkOopDesc::BarrierType barrier>
165 void stackChunkOopDesc::do_barriers() {
166   DoBarriersStackClosure<barrier> closure(this);
167   iterate_stack(&closure);
168 }
169 
170 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Load> ();
171 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Store>();
172 
173 class DerivedPointersSupport {
174 public:
175   static void relativize(derived_base* base_loc, derived_pointer* derived_loc) {
176     // The base oop could be stale from the GC's point-of-view. Treat it as an
177     // uintptr_t to stay clear of the oop verification code in oopsHierarcy.hpp.
178     uintptr_t base = *(uintptr_t*)base_loc;
179     if (base == 0) {
180       return;
181     }
182     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
183 
184     // This is always a full derived pointer
185     uintptr_t derived_int_val = *(uintptr_t*)derived_loc;
186 
187     // Make the pointer an offset (relativize) and store it at the same location
188     uintptr_t offset = derived_int_val - base;
189     *(uintptr_t*)derived_loc = offset;
190   }
191 
192   static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) {
193     uintptr_t base = *(uintptr_t*)base_loc;
194     if (base == 0) {
195       return;
196     }
197     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
198 
199     // All derived pointers should have been relativized into offsets
200     uintptr_t offset = *(uintptr_t*)derived_loc;
201 
202     // Restore the original derived pointer
203     *(uintptr_t*)derived_loc = base + offset;
204   }
205 
206   struct RelativizeClosure : public DerivedOopClosure {
207     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
208       DerivedPointersSupport::relativize(base_loc, derived_loc);
209     }
210   };
211 
212   struct DerelativizeClosure : public DerivedOopClosure {
213     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
214       DerivedPointersSupport::derelativize(base_loc, derived_loc);
215     }
216   };
217 };
218 
219 template <typename DerivedPointerClosureType>
220 class EncodeGCModeConcurrentFrameClosure {
221   stackChunkOop _chunk;
222   DerivedPointerClosureType* _cl;
223 
224 public:
225   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
226     : _chunk(chunk),
227       _cl(cl) {
228   }
229 
230   template <ChunkFrames frame_kind, typename RegisterMapT>
231   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
232     f.iterate_derived_pointers(_cl, map);
233 
234     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
235     frame fr = f.to_frame();
236     FrameOopIterator<RegisterMapT> iterator(fr, map);
237     bs_chunk->encode_gc_mode(_chunk, &iterator);
238 
239     return true;
240   }
241 
242   bool do_lockstack() {
243     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
244     LockStackOopIterator iterator(_chunk);
245     bs_chunk->encode_gc_mode(_chunk, &iterator);
246 
247     return true;
248   }
249 };
250 
251 bool stackChunkOopDesc::try_acquire_relativization() {
252   for (;;) {
253     // We use an acquiring load when reading the flags to ensure that if we leave this
254     // function thinking that relativization is finished, we know that if another thread
255     // did the relativization, we will still be able to observe the relativized derived
256     // pointers, which is important as subsequent modifications of derived pointers must
257     // happen after relativization.
258     uint8_t flags_before = flags_acquire();
259     if ((flags_before & FLAG_GC_MODE) != 0) {
260       // Terminal state - relativization is ensured
261       return false;
262     }
263 
264     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
265       // Someone else has claimed relativization - wait for completion
266       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
267       uint8_t flags_under_lock = flags_acquire();
268       if ((flags_under_lock & FLAG_GC_MODE) != 0) {
269         // Terminal state - relativization is ensured
270         return false;
271       }
272 
273       if ((flags_under_lock & FLAG_NOTIFY_RELATIVIZE) != 0) {
274         // Relativization is claimed by another thread, and it knows it needs to notify
275         ml.wait();
276       } else if (try_set_flags(flags_under_lock, flags_under_lock | FLAG_NOTIFY_RELATIVIZE)) {
277         // Relativization is claimed by another thread, and it knows it needs to notify
278         ml.wait();
279       }
280       // Retry - rerun the loop
281       continue;
282     }
283 
284     if (try_set_flags(flags_before, flags_before | FLAG_CLAIM_RELATIVIZE)) {
285       // Claimed relativization - let's do it
286       return true;
287     }
288   }
289 }
290 
291 void stackChunkOopDesc::release_relativization() {
292   for (;;) {
293     uint8_t flags_before = flags();
294     if ((flags_before & FLAG_NOTIFY_RELATIVIZE) != 0) {
295       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
296       // No need to CAS the terminal state; nobody else can be racingly mutating here
297       // as both claim and notify flags are already set (and monotonic)
298       // We do however need to use a releasing store on the flags, to ensure that
299       // the reader of that value (using load_acquire) will be able to observe
300       // the relativization of the derived pointers
301       uint8_t flags_under_lock = flags();
302       release_set_flags(flags_under_lock | FLAG_GC_MODE);
303       ml.notify_all();
304       return;
305     }
306 
307     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
308       // Successfully set the terminal state; we are done
309       return;
310     }
311   }
312 }
313 
314 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
315   if (!try_acquire_relativization()) {
316     // Already relativized
317     return;
318   }
319 
320   DerivedPointersSupport::RelativizeClosure derived_cl;
321   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
322   iterate_stack(&frame_cl);
323   frame_cl.do_lockstack();
324 
325   release_relativization();
326 }
327 
328 class TransformStackChunkClosure {
329   stackChunkOop _chunk;
330 
331 public:
332   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
333 
334   template <ChunkFrames frame_kind, typename RegisterMapT>
335   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
336     DerivedPointersSupport::RelativizeClosure derived_cl;
337     f.iterate_derived_pointers(&derived_cl, map);
338 
339     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
340     frame fr = f.to_frame();
341     FrameOopIterator<RegisterMapT> iterator(fr, map);
342     bs_chunk->encode_gc_mode(_chunk, &iterator);
343 
344     return true;
345   }
346 
347   bool do_lockstack() {
348     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
349     LockStackOopIterator iterator(_chunk);
350     bs_chunk->encode_gc_mode(_chunk, &iterator);
351 
352     return true;
353   }
354 };
355 
356 void stackChunkOopDesc::transform() {
357   assert(!is_gc_mode(), "Should only be called once per chunk");
358   set_gc_mode(true);
359 
360   assert(!has_bitmap(), "Should only be set once");
361   set_has_bitmap(true);
362   bitmap().clear();
363 
364   TransformStackChunkClosure closure(this);
365   iterate_stack(&closure);
366   closure.do_lockstack();
367 }
368 
369 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
370 class BarrierClosure: public OopClosure {
371   NOT_PRODUCT(intptr_t* _sp;)
372 
373 public:
374   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
375 
376   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
377   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
378 
379   template <class T> inline void do_oop_work(T* p) {
380     oop value = (oop)HeapAccess<>::oop_load(p);
381     if (barrier == stackChunkOopDesc::BarrierType::Store) {
382       HeapAccess<>::oop_store(p, value);
383     }
384   }
385 };
386 
387 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
388 void stackChunkOopDesc::do_barriers0(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
389   // We need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned
390   assert (!f.is_done(), "");
391 
392   if (f.is_interpreted()) {
393     Method* m = f.to_frame().interpreter_frame_method();
394     // Class redefinition support
395     m->record_gc_epoch();
396   } else if (f.is_compiled()) {
397     nmethod* nm = f.cb()->as_nmethod();
398     // The entry barrier takes care of having the right synchronization
399     // when keeping the nmethod alive during concurrent execution.
400     nm->run_nmethod_entry_barrier();
401     // There is no need to mark the Method, as class redefinition will walk the
402     // CodeCache, noting their Methods
403   }
404 
405   if (has_bitmap() && UseCompressedOops) {
406     BarrierClosure<barrier, true> oops_closure(f.sp());
407     f.iterate_oops(&oops_closure, map);
408   } else {
409     BarrierClosure<barrier, false> oops_closure(f.sp());
410     f.iterate_oops(&oops_closure, map);
411   }
412 }
413 
414 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapNoArgs* map);
419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapNoArgs* map);
420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMapNoArgs* map);
421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMapNoArgs* map);
422 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapWithArgs* map);
423 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapWithArgs* map);
424 
425 template <typename RegisterMapT>
426 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
427   if (!(is_gc_mode() || requires_barriers())) {
428     return;
429   }
430 
431   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
432   FrameOopIterator<RegisterMapT> iterator(f, map);
433   bs_chunk->decode_gc_mode(this, &iterator);
434 
435   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
436     DerivedPointersSupport::DerelativizeClosure derived_closure;
437     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
438     visitor.oops_do(&f, map, f.oop_map());
439   }
440 }
441 
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
443 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMapNoArgs* map);
444 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMapWithArgs* map);
445 
446 void stackChunkOopDesc::transfer_lockstack(oop* dst, bool requires_barriers) {
447   const bool requires_gc_barriers = is_gc_mode() || requires_barriers;
448   const bool requires_uncompress = has_bitmap() && UseCompressedOops;
449   const auto load_and_clear_obj = [&](intptr_t* at) -> oop {
450     if (requires_gc_barriers) {
451       if (requires_uncompress) {
452         oop value = HeapAccess<>::oop_load(reinterpret_cast<narrowOop*>(at));
453         HeapAccess<>::oop_store(reinterpret_cast<narrowOop*>(at), nullptr);
454         return value;
455       } else {
456         oop value = HeapAccess<>::oop_load(reinterpret_cast<oop*>(at));
457         HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr);
458         return value;
459       }
460     } else {
461       oop value = *reinterpret_cast<oop*>(at);
462       return value;
463     }
464   };
465 
466   const int cnt = lockstack_size();
467   intptr_t* lockstack_start = start_address();
468   for (int i = 0; i < cnt; i++) {
469     oop mon_owner = load_and_clear_obj(&lockstack_start[i]);
470     assert(oopDesc::is_oop(mon_owner), "not an oop");
471     dst[i] = mon_owner;
472   }
473 }
474 
475 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
476   if (*((juint*)this) == badHeapWordVal) {
477     st->print_cr("BAD WORD");
478   } else {
479     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
480   }
481 }
482 
483 #ifdef ASSERT
484 
485 class StackChunkVerifyOopsClosure : public OopClosure {
486   stackChunkOop _chunk;
487   int _count;
488 
489 public:
490   StackChunkVerifyOopsClosure(stackChunkOop chunk)
491     : _chunk(chunk), _count(0) {}
492 
493   void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
494   void do_oop(narrowOop* p) override { do_oop_work(p); }
495 
496   template <typename T> inline void do_oop_work(T* p) {
497     _count++;
498     oop obj = _chunk->load_oop(p);
499     assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
500     if (_chunk->has_bitmap()) {
501       BitMap::idx_t index = _chunk->bit_index_for(p);
502       assert(_chunk->bitmap().at(index), "Bit not set at index %zu corresponding to " PTR_FORMAT, index, p2i(p));
503     }
504   }
505 
506   int count() const { return _count; }
507 };
508 
509 class VerifyStackChunkFrameClosure {
510   stackChunkOop _chunk;
511 
512 public:
513   intptr_t* _sp;
514   CodeBlob* _cb;
515   bool _callee_interpreted;
516   int _size;
517   int _argsize;
518   int _num_oops;
519   int _num_frames;
520   int _num_interpreted_frames;
521   int _num_i2c;
522 
523   VerifyStackChunkFrameClosure(stackChunkOop chunk)
524     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
525       _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
526 
527   template <ChunkFrames frame_kind, typename RegisterMapT>
528   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
529     _sp = f.sp();
530     _cb = f.cb();
531 
532     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
533     int num_oops = f.num_oops(map);
534     assert(num_oops >= 0, "");
535 
536     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
537     _size     += fsize;
538     _num_oops += num_oops;
539     if (f.is_interpreted()) {
540       _num_interpreted_frames++;
541     }
542 
543     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
544     LogTarget(Trace, continuations) lt;
545     if (lt.develop_is_enabled()) {
546       LogStream ls(lt);
547       f.print_on(&ls);
548     }
549     assert(f.pc() != nullptr,
550            "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
551            !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
552 
553     if (_num_frames == 0) {
554       assert(f.pc() == _chunk->pc(), "");
555     }
556 
557     if (_num_frames > 0 && !_callee_interpreted && f.is_interpreted()) {
558       log_develop_trace(continuations)("debug_verify_stack_chunk i2c");
559       _num_i2c++;
560     }
561 
562     StackChunkVerifyOopsClosure oops_closure(_chunk);
563     f.iterate_oops(&oops_closure, map);
564     assert(oops_closure.count() == num_oops, "oops: %d oopmap->num_oops(): %d", oops_closure.count(), num_oops);
565 
566     _callee_interpreted = f.is_interpreted();
567     _num_frames++;
568     return true;
569   }
570 };
571 
572 template <typename T>
573 class StackChunkVerifyBitmapClosure : public BitMapClosure {
574   stackChunkOop _chunk;
575 
576 public:
577   int _count;
578 
579   StackChunkVerifyBitmapClosure(stackChunkOop chunk) : _chunk(chunk), _count(0) {}
580 
581   bool do_bit(BitMap::idx_t index) override {
582     T* p = _chunk->address_for_bit<T>(index);
583     _count++;
584 
585     oop obj = _chunk->load_oop(p);
586     assert(obj == nullptr || dbg_is_good_oop(obj),
587            "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: %zu",
588            p2i(p), p2i((oopDesc*)obj), index);
589 
590     return true; // continue processing
591   }
592 };
593 
594 bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, int* out_interpreted_frames) {
595   DEBUG_ONLY(if (!VerifyContinuations) return true;)
596 
597   assert(oopDesc::is_oop(this), "");
598 
599   assert(stack_size() >= 0, "");
600   assert(!has_bitmap() || is_gc_mode(), "");
601 
602   if (is_empty()) {
603     assert(max_thawing_size() == 0, "");
604   } else {
605     assert(argsize() >= 0, "");
606   }
607 
608   assert(oopDesc::is_oop_or_null(parent()), "");
609 
610   const bool concurrent = !Thread::current()->is_Java_thread();
611 
612   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
613   // for the top frame (below sp), and *not* for the bottom frame.
614   int size = bottom() - sp();
615   assert(size >= 0, "");
616   assert((size == 0) == is_empty(), "");
617 
618   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
619 
620   VerifyStackChunkFrameClosure closure(this);
621   iterate_stack(&closure);
622 
623   assert(!is_empty() || closure._cb == nullptr, "");
624   if (closure._cb != nullptr && closure._cb->is_nmethod()) {
625     assert(argsize() ==
626       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
627       "chunk argsize: %d bottom frame argsize: %d", argsize(),
628       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
629   }
630 
631   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
632 
633   if (!concurrent) {
634     assert(closure._size <= size + (stack_size() - bottom()),
635            "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
636            size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
637     if (closure._num_frames > 0) {
638       assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
639       assert(argsize() == closure._argsize - frame::metadata_words_at_top,
640              "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
641              argsize(), closure._argsize, closure._callee_interpreted);
642     }
643 
644     int calculated_max_size = closure._size
645                               + closure._num_i2c * frame::align_wiggle
646                               + closure._num_interpreted_frames * frame::align_wiggle;
647     assert(max_thawing_size() == calculated_max_size,
648            "max_size(): %d calculated_max_size: %d argsize: %d num_i2c: %d",
649            max_thawing_size(), calculated_max_size, closure._argsize, closure._num_i2c);
650 
651     if (out_size   != nullptr) *out_size   += size;
652     if (out_oops   != nullptr) *out_oops   += closure._num_oops;
653     if (out_frames != nullptr) *out_frames += closure._num_frames;
654     if (out_interpreted_frames != nullptr) *out_interpreted_frames += closure._num_interpreted_frames;
655   } else {
656     assert(out_size == nullptr, "");
657     assert(out_oops == nullptr, "");
658     assert(out_frames == nullptr, "");
659     assert(out_interpreted_frames == nullptr, "");
660   }
661 
662   if (has_bitmap()) {
663     assert(bitmap().size() == InstanceStackChunkKlass::bitmap_size_in_bits(stack_size()),
664            "bitmap().size(): %zu stack_size: %d",
665            bitmap().size(), stack_size());
666 
667     int oop_count;
668     if (UseCompressedOops) {
669       StackChunkVerifyBitmapClosure<narrowOop> bitmap_closure(this);
670       bitmap().iterate(&bitmap_closure,
671                        bit_index_for((narrowOop*)(sp_address() - frame::metadata_words_at_bottom)),
672                        bit_index_for((narrowOop*)end_address()));
673       oop_count = bitmap_closure._count;
674     } else {
675       StackChunkVerifyBitmapClosure<oop> bitmap_closure(this);
676       bitmap().iterate(&bitmap_closure,
677                        bit_index_for((oop*)(sp_address() - frame::metadata_words_at_bottom)),
678                        bit_index_for((oop*)end_address()));
679       oop_count = bitmap_closure._count;
680     }
681     assert(oop_count == closure._num_oops,
682            "bitmap_closure._count: %d closure._num_oops: %d", oop_count, closure._num_oops);
683   }
684 
685   return true;
686 }
687 #endif