1 /*
  2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "code/scopeDesc.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetStackChunk.hpp"
 30 #include "logging/log.hpp"
 31 #include "logging/logStream.hpp"
 32 #include "memory/memRegion.hpp"
 33 #include "oops/instanceStackChunkKlass.inline.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "oops/stackChunkOop.inline.hpp"
 36 #include "runtime/frame.hpp"
 37 #include "runtime/registerMap.hpp"
 38 #include "runtime/smallRegisterMap.inline.hpp"
 39 #include "runtime/stackChunkFrameStream.inline.hpp"
 40 
 41 // Note: Some functions in this file work with stale object pointers, e.g.
 42 //       DerivedPointerSupport. Be extra careful to not put those pointers into
 43 //       variables of the 'oop' type. There's extra GC verification around oops
 44 //       that may fail when stale oops are being used.
 45 
 46 template <typename RegisterMapT>
 47 class FrameOopIterator : public OopIterator {
 48 private:
 49   const frame& _f;
 50   const RegisterMapT* _map;
 51 
 52 public:
 53   FrameOopIterator(const frame& f, const RegisterMapT* map)
 54     : _f(f),
 55       _map(map) {
 56   }
 57 
 58   virtual void oops_do(OopClosure* cl) override {
 59     if (_f.is_interpreted_frame()) {
 60       _f.oops_interpreted_do(cl, nullptr);
 61     } else {
 62       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 63       visitor.oops_do(&_f, _map, _f.oop_map());
 64     }
 65   }
 66 };
 67 















 68 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 69   assert(!is_empty(), "");
 70   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 71 
 72   map->set_stack_chunk(this);
 73   fs.initialize_register_map(map);
 74 
 75   frame f = fs.to_frame();
 76 
 77   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 78   relativize_frame(f);
 79   f.set_frame_index(0);
 80   return f;
 81 }
 82 
 83 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 84   assert(map->in_cont(), "");
 85   assert(!map->include_argument_oops(), "");
 86   assert(!f.is_empty(), "");
 87   assert(map->stack_chunk() == this, "");
 88   assert(!is_empty(), "");
 89 
 90   int index = f.frame_index(); // we need to capture the index before calling derelativize, which destroys it
 91   StackChunkFrameStream<ChunkFrames::Mixed> fs(this, derelativize(f));
 92   fs.next(map);
 93 
 94   if (!fs.is_done()) {
 95     frame sender = fs.to_frame();
 96     assert(is_usable_in_chunk(sender.unextended_sp()), "");
 97     relativize_frame(sender);
 98 
 99     sender.set_frame_index(index+1);
100     return sender;
101   }
102 
103   if (parent() != nullptr) {
104     assert(!parent()->is_empty(), "");
105     return parent()->top_frame(map);
106   }
107 
108   return Continuation::continuation_parent_frame(map);
109 }
110 
111 static int num_java_frames(nmethod* nm, address pc) {
112   int count = 0;
113   for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
114     count++;
115   }
116   return count;
117 }
118 
119 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
120   assert(f.is_interpreted()
121          || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
122   return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
123 }
124 
125 int stackChunkOopDesc::num_java_frames() const {
126   int n = 0;
127   for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
128        f.next(SmallRegisterMap::instance())) {
129     if (!f.is_stub()) {
130       n += ::num_java_frames(f);
131     }
132   }
133   return n;
134 }
135 
136 template <stackChunkOopDesc::BarrierType barrier>
137 class DoBarriersStackClosure {
138   const stackChunkOop _chunk;
139 
140 public:
141   DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
142 
143   template <ChunkFrames frame_kind, typename RegisterMapT>
144   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
145     _chunk->do_barriers0<barrier>(f, map);
146     return true;
147   }
148 };
149 
150 template <stackChunkOopDesc::BarrierType barrier>
151 void stackChunkOopDesc::do_barriers() {
152   DoBarriersStackClosure<barrier> closure(this);
153   iterate_stack(&closure);
154 }
155 
156 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Load> ();
157 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Store>();
158 
159 class DerivedPointersSupport {
160 public:
161   static void relativize(derived_base* base_loc, derived_pointer* derived_loc) {
162     // The base oop could be stale from the GC's point-of-view. Treat it as an
163     // uintptr_t to stay clear of the oop verification code in oopsHierarcy.hpp.
164     uintptr_t base = *(uintptr_t*)base_loc;
165     if (base == 0) {
166       return;
167     }
168     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
169 
170     // This is always a full derived pointer
171     uintptr_t derived_int_val = *(uintptr_t*)derived_loc;
172 
173     // Make the pointer an offset (relativize) and store it at the same location
174     uintptr_t offset = derived_int_val - base;
175     *(uintptr_t*)derived_loc = offset;
176   }
177 
178   static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) {
179     uintptr_t base = *(uintptr_t*)base_loc;
180     if (base == 0) {
181       return;
182     }
183     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
184 
185     // All derived pointers should have been relativized into offsets
186     uintptr_t offset = *(uintptr_t*)derived_loc;
187 
188     // Restore the original derived pointer
189     *(uintptr_t*)derived_loc = base + offset;
190   }
191 
192   struct RelativizeClosure : public DerivedOopClosure {
193     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
194       DerivedPointersSupport::relativize(base_loc, derived_loc);
195     }
196   };
197 
198   struct DerelativizeClosure : public DerivedOopClosure {
199     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
200       DerivedPointersSupport::derelativize(base_loc, derived_loc);
201     }
202   };
203 };
204 
205 template <typename DerivedPointerClosureType>
206 class EncodeGCModeConcurrentFrameClosure {
207   stackChunkOop _chunk;
208   DerivedPointerClosureType* _cl;
209 
210 public:
211   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
212     : _chunk(chunk),
213       _cl(cl) {
214   }
215 
216   template <ChunkFrames frame_kind, typename RegisterMapT>
217   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
218     f.iterate_derived_pointers(_cl, map);
219 
220     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
221     frame fr = f.to_frame();
222     FrameOopIterator<RegisterMapT> iterator(fr, map);
223     bs_chunk->encode_gc_mode(_chunk, &iterator);
224 
225     return true;
226   }








227 };
228 
229 bool stackChunkOopDesc::try_acquire_relativization() {
230   for (;;) {
231     // We use an acquiring load when reading the flags to ensure that if we leave this
232     // function thinking that relativization is finished, we know that if another thread
233     // did the relativization, we will still be able to observe the relativized derived
234     // pointers, which is important as subsequent modifications of derived pointers must
235     // happen after relativization.
236     uint8_t flags_before = flags_acquire();
237     if ((flags_before & FLAG_GC_MODE) != 0) {
238       // Terminal state - relativization is ensured
239       return false;
240     }
241 
242     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
243       // Someone else has claimed relativization - wait for completion
244       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
245       uint8_t flags_under_lock = flags_acquire();
246       if ((flags_under_lock & FLAG_GC_MODE) != 0) {
247         // Terminal state - relativization is ensured
248         return false;
249       }
250 
251       if ((flags_under_lock & FLAG_NOTIFY_RELATIVIZE) != 0) {
252         // Relativization is claimed by another thread, and it knows it needs to notify
253         ml.wait();
254       } else if (try_set_flags(flags_under_lock, flags_under_lock | FLAG_NOTIFY_RELATIVIZE)) {
255         // Relativization is claimed by another thread, and it knows it needs to notify
256         ml.wait();
257       }
258       // Retry - rerun the loop
259       continue;
260     }
261 
262     if (try_set_flags(flags_before, flags_before | FLAG_CLAIM_RELATIVIZE)) {
263       // Claimed relativization - let's do it
264       return true;
265     }
266   }
267 }
268 
269 void stackChunkOopDesc::release_relativization() {
270   for (;;) {
271     uint8_t flags_before = flags();
272     if ((flags_before & FLAG_NOTIFY_RELATIVIZE) != 0) {
273       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
274       // No need to CAS the terminal state; nobody else can be racingly mutating here
275       // as both claim and notify flags are already set (and monotonic)
276       // We do however need to use a releasing store on the flags, to ensure that
277       // the reader of that value (using load_acquire) will be able to observe
278       // the relativization of the derived pointers
279       uint8_t flags_under_lock = flags();
280       release_set_flags(flags_under_lock | FLAG_GC_MODE);
281       ml.notify_all();
282       return;
283     }
284 
285     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
286       // Successfully set the terminal state; we are done
287       return;
288     }
289   }
290 }
291 
292 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
293   if (!try_acquire_relativization()) {
294     // Already relativized
295     return;
296   }
297 
298   DerivedPointersSupport::RelativizeClosure derived_cl;
299   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
300   iterate_stack(&frame_cl);

301 
302   release_relativization();
303 }
304 
305 class TransformStackChunkClosure {
306   stackChunkOop _chunk;
307 
308 public:
309   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
310 
311   template <ChunkFrames frame_kind, typename RegisterMapT>
312   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
313     DerivedPointersSupport::RelativizeClosure derived_cl;
314     f.iterate_derived_pointers(&derived_cl, map);
315 
316     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
317     frame fr = f.to_frame();
318     FrameOopIterator<RegisterMapT> iterator(fr, map);
319     bs_chunk->encode_gc_mode(_chunk, &iterator);
320 
321     return true;
322   }








323 };
324 
325 void stackChunkOopDesc::transform() {
326   assert(!is_gc_mode(), "Should only be called once per chunk");
327   set_gc_mode(true);
328 
329   assert(!has_bitmap(), "Should only be set once");
330   set_has_bitmap(true);
331   bitmap().clear();
332 
333   TransformStackChunkClosure closure(this);
334   iterate_stack(&closure);

335 }
336 
337 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
338 class BarrierClosure: public OopClosure {
339   NOT_PRODUCT(intptr_t* _sp;)
340 
341 public:
342   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
343 
344   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
345   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
346 
347   template <class T> inline void do_oop_work(T* p) {
348     oop value = (oop)HeapAccess<>::oop_load(p);
349     if (barrier == stackChunkOopDesc::BarrierType::Store) {
350       HeapAccess<>::oop_store(p, value);
351     }
352   }
353 };
354 
355 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
356 void stackChunkOopDesc::do_barriers0(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
357   // We need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned
358   assert (!f.is_done(), "");
359 
360   if (f.is_interpreted()) {
361     Method* m = f.to_frame().interpreter_frame_method();
362     // Class redefinition support
363     m->record_gc_epoch();
364   } else if (f.is_compiled()) {
365     nmethod* nm = f.cb()->as_nmethod();
366     // The entry barrier takes care of having the right synchronization
367     // when keeping the nmethod alive during concurrent execution.
368     nm->run_nmethod_entry_barrier();
369     // There is no need to mark the Method, as class redefinition will walk the
370     // CodeCache, noting their Methods
371   }
372 
373   if (has_bitmap() && UseCompressedOops) {
374     BarrierClosure<barrier, true> oops_closure(f.sp());
375     f.iterate_oops(&oops_closure, map);
376   } else {
377     BarrierClosure<barrier, false> oops_closure(f.sp());
378     f.iterate_oops(&oops_closure, map);
379   }
380 }
381 
382 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
383 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
384 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
385 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
386 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
387 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
388 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
389 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
390 
391 template <typename RegisterMapT>
392 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
393   if (!(is_gc_mode() || requires_barriers())) {
394     return;
395   }
396 
397   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
398   FrameOopIterator<RegisterMapT> iterator(f, map);
399   bs_chunk->decode_gc_mode(this, &iterator);
400 
401   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
402     DerivedPointersSupport::DerelativizeClosure derived_closure;
403     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
404     visitor.oops_do(&f, map, f.oop_map());
405   }
406 }
407 
408 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
409 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
410 






























411 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
412   if (*((juint*)this) == badHeapWordVal) {
413     st->print_cr("BAD WORD");
414   } else {
415     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
416   }
417 }
418 
419 #ifdef ASSERT
420 
421 class StackChunkVerifyOopsClosure : public OopClosure {
422   stackChunkOop _chunk;
423   int _count;
424 
425 public:
426   StackChunkVerifyOopsClosure(stackChunkOop chunk)
427     : _chunk(chunk), _count(0) {}
428 
429   void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
430   void do_oop(narrowOop* p) override { do_oop_work(p); }
431 
432   template <typename T> inline void do_oop_work(T* p) {
433     _count++;
434     oop obj = _chunk->load_oop(p);
435     assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
436     if (_chunk->has_bitmap()) {
437       BitMap::idx_t index = _chunk->bit_index_for(p);
438       assert(_chunk->bitmap().at(index), "Bit not set at index " SIZE_FORMAT " corresponding to " PTR_FORMAT, index, p2i(p));
439     }
440   }
441 
442   int count() const { return _count; }
443 };
444 
445 class VerifyStackChunkFrameClosure {
446   stackChunkOop _chunk;
447 
448 public:
449   intptr_t* _sp;
450   CodeBlob* _cb;
451   bool _callee_interpreted;
452   int _size;
453   int _argsize;
454   int _num_oops;
455   int _num_frames;
456   int _num_interpreted_frames;
457   int _num_i2c;
458 
459   VerifyStackChunkFrameClosure(stackChunkOop chunk, int num_frames, int size)
460     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
461       _size(size), _argsize(0), _num_oops(0), _num_frames(num_frames), _num_interpreted_frames(0), _num_i2c(0) {}
462 
463   template <ChunkFrames frame_kind, typename RegisterMapT>
464   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
465     _sp = f.sp();
466     _cb = f.cb();
467 
468     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
469     int num_oops = f.num_oops();
470     assert(num_oops >= 0, "");
471 
472     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
473     _size     += fsize;
474     _num_oops += num_oops;
475     if (f.is_interpreted()) {
476       _num_interpreted_frames++;
477     }
478 
479     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
480     LogTarget(Trace, continuations) lt;
481     if (lt.develop_is_enabled()) {
482       LogStream ls(lt);
483       f.print_on(&ls);
484     }
485     assert(f.pc() != nullptr,
486            "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
487            !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
488 
489     if (_num_frames == 0) {
490       assert(f.pc() == _chunk->pc(), "");
491     }
492 
493     if (_num_frames > 0 && !_callee_interpreted && f.is_interpreted()) {
494       log_develop_trace(continuations)("debug_verify_stack_chunk i2c");
495       _num_i2c++;
496     }
497 
498     StackChunkVerifyOopsClosure oops_closure(_chunk);
499     f.iterate_oops(&oops_closure, map);
500     assert(oops_closure.count() == num_oops, "oops: %d oopmap->num_oops(): %d", oops_closure.count(), num_oops);
501 
502     _callee_interpreted = f.is_interpreted();
503     _num_frames++;
504     return true;
505   }
506 };
507 
508 template <typename T>
509 class StackChunkVerifyBitmapClosure : public BitMapClosure {
510   stackChunkOop _chunk;
511 
512 public:
513   int _count;
514 
515   StackChunkVerifyBitmapClosure(stackChunkOop chunk) : _chunk(chunk), _count(0) {}
516 
517   bool do_bit(BitMap::idx_t index) override {
518     T* p = _chunk->address_for_bit<T>(index);
519     _count++;
520 
521     oop obj = _chunk->load_oop(p);
522     assert(obj == nullptr || dbg_is_good_oop(obj),
523            "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: " SIZE_FORMAT,
524            p2i(p), p2i((oopDesc*)obj), index);
525 
526     return true; // continue processing
527   }
528 };
529 
530 bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, int* out_interpreted_frames) {
531   DEBUG_ONLY(if (!VerifyContinuations) return true;)
532 
533   assert(oopDesc::is_oop(this), "");
534 
535   assert(stack_size() >= 0, "");
536   assert(!has_bitmap() || is_gc_mode(), "");
537 
538   if (is_empty()) {
539     assert(max_thawing_size() == 0, "");
540   } else {
541     assert(argsize() >= 0, "");
542   }
543 
544   assert(oopDesc::is_oop_or_null(parent()), "");
545 
546   const bool concurrent = !Thread::current()->is_Java_thread();
547 
548   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
549   // for the top frame (below sp), and *not* for the bottom frame.
550   int size = bottom() - sp();
551   assert(size >= 0, "");
552   assert((size == 0) == is_empty(), "");
553 
554   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);
555   const bool has_safepoint_stub_frame = first.is_stub();
556 
557   VerifyStackChunkFrameClosure closure(this,
558                                        has_safepoint_stub_frame ? 1 : 0, // Iterate_stack skips the safepoint stub
559                                        has_safepoint_stub_frame ? first.frame_size() : 0);
560   iterate_stack(&closure);
561 
562   assert(!is_empty() || closure._cb == nullptr, "");
563   if (closure._cb != nullptr && closure._cb->is_nmethod()) {
564     assert(argsize() ==
565       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
566       "chunk argsize: %d bottom frame argsize: %d", argsize(),
567       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
568   }
569 
570   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
571 
572   if (!concurrent) {
573     assert(closure._size <= size + (stack_size() - bottom()),
574            "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
575            size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
576     if (closure._num_frames > 0) {
577       assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
578       assert(argsize() == closure._argsize - frame::metadata_words_at_top,
579              "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
580              argsize(), closure._argsize, closure._callee_interpreted);
581     }
582 
583     int calculated_max_size = closure._size
584                               + closure._num_i2c * frame::align_wiggle
585                               + closure._num_interpreted_frames * frame::align_wiggle;
586     assert(max_thawing_size() == calculated_max_size,
587            "max_size(): %d calculated_max_size: %d argsize: %d num_i2c: %d",
588            max_thawing_size(), calculated_max_size, closure._argsize, closure._num_i2c);
589 
590     if (out_size   != nullptr) *out_size   += size;
591     if (out_oops   != nullptr) *out_oops   += closure._num_oops;
592     if (out_frames != nullptr) *out_frames += closure._num_frames;
593     if (out_interpreted_frames != nullptr) *out_interpreted_frames += closure._num_interpreted_frames;
594   } else {
595     assert(out_size == nullptr, "");
596     assert(out_oops == nullptr, "");
597     assert(out_frames == nullptr, "");
598     assert(out_interpreted_frames == nullptr, "");
599   }
600 
601   if (has_bitmap()) {
602     assert(bitmap().size() == InstanceStackChunkKlass::bitmap_size_in_bits(stack_size()),
603            "bitmap().size(): %zu stack_size: %d",
604            bitmap().size(), stack_size());
605 
606     int oop_count;
607     if (UseCompressedOops) {
608       StackChunkVerifyBitmapClosure<narrowOop> bitmap_closure(this);
609       bitmap().iterate(&bitmap_closure,
610                        bit_index_for((narrowOop*)(sp_address() - frame::metadata_words_at_bottom)),
611                        bit_index_for((narrowOop*)end_address()));
612       oop_count = bitmap_closure._count;
613     } else {
614       StackChunkVerifyBitmapClosure<oop> bitmap_closure(this);
615       bitmap().iterate(&bitmap_closure,
616                        bit_index_for((oop*)(sp_address() - frame::metadata_words_at_bottom)),
617                        bit_index_for((oop*)end_address()));
618       oop_count = bitmap_closure._count;
619     }
620     assert(oop_count == closure._num_oops,
621            "bitmap_closure._count: %d closure._num_oops: %d", oop_count, closure._num_oops);
622   }
623 
624   return true;
625 }
626 #endif
--- EOF ---