1 /*
  2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "code/nmethod.hpp"
 27 #include "code/scopeDesc.hpp"
 28 #include "gc/shared/barrierSet.hpp"
 29 #include "gc/shared/barrierSetStackChunk.hpp"
 30 #include "logging/log.hpp"
 31 #include "logging/logStream.hpp"
 32 #include "memory/memRegion.hpp"
 33 #include "oops/instanceStackChunkKlass.inline.hpp"
 34 #include "oops/oop.inline.hpp"
 35 #include "oops/stackChunkOop.inline.hpp"
 36 #include "runtime/frame.hpp"
 37 #include "runtime/registerMap.hpp"
 38 #include "runtime/smallRegisterMap.inline.hpp"
 39 #include "runtime/stackChunkFrameStream.inline.hpp"
 40 
 41 // Note: Some functions in this file work with stale object pointers, e.g.
 42 //       DerivedPointerSupport. Be extra careful to not put those pointers into
 43 //       variables of the 'oop' type. There's extra GC verification around oops
 44 //       that may fail when stale oops are being used.
 45 
 46 template <typename RegisterMapT>
 47 class FrameOopIterator : public OopIterator {
 48 private:
 49   const frame& _f;
 50   const RegisterMapT* _map;
 51 
 52 public:
 53   FrameOopIterator(const frame& f, const RegisterMapT* map)
 54     : _f(f),
 55       _map(map) {
 56   }
 57 
 58   virtual void oops_do(OopClosure* cl) override {
 59     if (_f.is_interpreted_frame()) {
 60       _f.oops_interpreted_do(cl, nullptr);
 61     } else {
 62       OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
 63       visitor.oops_do(&_f, _map, _f.oop_map());
 64     }
 65   }
 66 };
 67 
 68 class LockStackOopIterator : public OopIterator {
 69 private:
 70   const stackChunkOop _chunk;
 71 public:
 72   LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
 73 
 74   virtual void oops_do(OopClosure* cl) override {
 75     int cnt = _chunk->lockstack_size();
 76     oop* lockstack_start = (oop*)_chunk->start_address();
 77     for (int i = 0; i < cnt; i++) {
 78       cl->do_oop(&lockstack_start[i]);
 79     }
 80   }
 81 };
 82 
 83 frame stackChunkOopDesc::top_frame(RegisterMap* map) {
 84   assert(!is_empty(), "");
 85   StackChunkFrameStream<ChunkFrames::Mixed> fs(this);
 86 
 87   map->set_stack_chunk(this);
 88   fs.initialize_register_map(map);
 89 
 90   frame f = fs.to_frame();
 91 
 92   assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async());
 93   relativize_frame(f);
 94   f.set_frame_index(0);
 95   return f;
 96 }
 97 
 98 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) {
 99   assert(map->in_cont(), "");
100   assert(!map->include_argument_oops(), "");
101   assert(!f.is_empty(), "");
102   assert(map->stack_chunk() == this, "");
103   assert(!is_empty(), "");
104 
105   int index = f.frame_index(); // we need to capture the index before calling derelativize, which destroys it
106   StackChunkFrameStream<ChunkFrames::Mixed> fs(this, derelativize(f));
107   fs.next(map);
108 
109   if (!fs.is_done()) {
110     frame sender = fs.to_frame();
111     assert(is_usable_in_chunk(sender.unextended_sp()), "");
112     relativize_frame(sender);
113 
114     sender.set_frame_index(index+1);
115     return sender;
116   }
117 
118   if (parent() != nullptr) {
119     assert(!parent()->is_empty(), "");
120     return parent()->top_frame(map);
121   }
122 
123   return Continuation::continuation_parent_frame(map);
124 }
125 
126 static int num_java_frames(nmethod* nm, address pc) {
127   int count = 0;
128   for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
129     count++;
130   }
131   return count;
132 }
133 
134 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
135   assert(f.is_interpreted()
136          || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
137   return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
138 }
139 
140 int stackChunkOopDesc::num_java_frames() const {
141   int n = 0;
142   for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
143        f.next(SmallRegisterMap::instance())) {
144     if (!f.is_stub()) {
145       n += ::num_java_frames(f);
146     }
147   }
148   return n;
149 }
150 
151 template <stackChunkOopDesc::BarrierType barrier>
152 class DoBarriersStackClosure {
153   const stackChunkOop _chunk;
154 
155 public:
156   DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
157 
158   template <ChunkFrames frame_kind, typename RegisterMapT>
159   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
160     _chunk->do_barriers0<barrier>(f, map);
161     return true;
162   }
163 };
164 
165 template <stackChunkOopDesc::BarrierType barrier>
166 void stackChunkOopDesc::do_barriers() {
167   DoBarriersStackClosure<barrier> closure(this);
168   iterate_stack(&closure);
169 }
170 
171 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Load> ();
172 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Store>();
173 
174 class DerivedPointersSupport {
175 public:
176   static void relativize(derived_base* base_loc, derived_pointer* derived_loc) {
177     // The base oop could be stale from the GC's point-of-view. Treat it as an
178     // uintptr_t to stay clear of the oop verification code in oopsHierarcy.hpp.
179     uintptr_t base = *(uintptr_t*)base_loc;
180     if (base == 0) {
181       return;
182     }
183     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
184 
185     // This is always a full derived pointer
186     uintptr_t derived_int_val = *(uintptr_t*)derived_loc;
187 
188     // Make the pointer an offset (relativize) and store it at the same location
189     uintptr_t offset = derived_int_val - base;
190     *(uintptr_t*)derived_loc = offset;
191   }
192 
193   static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) {
194     uintptr_t base = *(uintptr_t*)base_loc;
195     if (base == 0) {
196       return;
197     }
198     assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), "");
199 
200     // All derived pointers should have been relativized into offsets
201     uintptr_t offset = *(uintptr_t*)derived_loc;
202 
203     // Restore the original derived pointer
204     *(uintptr_t*)derived_loc = base + offset;
205   }
206 
207   struct RelativizeClosure : public DerivedOopClosure {
208     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
209       DerivedPointersSupport::relativize(base_loc, derived_loc);
210     }
211   };
212 
213   struct DerelativizeClosure : public DerivedOopClosure {
214     virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override {
215       DerivedPointersSupport::derelativize(base_loc, derived_loc);
216     }
217   };
218 };
219 
220 template <typename DerivedPointerClosureType>
221 class EncodeGCModeConcurrentFrameClosure {
222   stackChunkOop _chunk;
223   DerivedPointerClosureType* _cl;
224 
225 public:
226   EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl)
227     : _chunk(chunk),
228       _cl(cl) {
229   }
230 
231   template <ChunkFrames frame_kind, typename RegisterMapT>
232   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
233     f.iterate_derived_pointers(_cl, map);
234 
235     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
236     frame fr = f.to_frame();
237     FrameOopIterator<RegisterMapT> iterator(fr, map);
238     bs_chunk->encode_gc_mode(_chunk, &iterator);
239 
240     return true;
241   }
242 
243   bool do_lockstack() {
244     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
245     LockStackOopIterator iterator(_chunk);
246     bs_chunk->encode_gc_mode(_chunk, &iterator);
247 
248     return true;
249   }
250 };
251 
252 bool stackChunkOopDesc::try_acquire_relativization() {
253   for (;;) {
254     // We use an acquiring load when reading the flags to ensure that if we leave this
255     // function thinking that relativization is finished, we know that if another thread
256     // did the relativization, we will still be able to observe the relativized derived
257     // pointers, which is important as subsequent modifications of derived pointers must
258     // happen after relativization.
259     uint8_t flags_before = flags_acquire();
260     if ((flags_before & FLAG_GC_MODE) != 0) {
261       // Terminal state - relativization is ensured
262       return false;
263     }
264 
265     if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) {
266       // Someone else has claimed relativization - wait for completion
267       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
268       uint8_t flags_under_lock = flags_acquire();
269       if ((flags_under_lock & FLAG_GC_MODE) != 0) {
270         // Terminal state - relativization is ensured
271         return false;
272       }
273 
274       if ((flags_under_lock & FLAG_NOTIFY_RELATIVIZE) != 0) {
275         // Relativization is claimed by another thread, and it knows it needs to notify
276         ml.wait();
277       } else if (try_set_flags(flags_under_lock, flags_under_lock | FLAG_NOTIFY_RELATIVIZE)) {
278         // Relativization is claimed by another thread, and it knows it needs to notify
279         ml.wait();
280       }
281       // Retry - rerun the loop
282       continue;
283     }
284 
285     if (try_set_flags(flags_before, flags_before | FLAG_CLAIM_RELATIVIZE)) {
286       // Claimed relativization - let's do it
287       return true;
288     }
289   }
290 }
291 
292 void stackChunkOopDesc::release_relativization() {
293   for (;;) {
294     uint8_t flags_before = flags();
295     if ((flags_before & FLAG_NOTIFY_RELATIVIZE) != 0) {
296       MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag);
297       // No need to CAS the terminal state; nobody else can be racingly mutating here
298       // as both claim and notify flags are already set (and monotonic)
299       // We do however need to use a releasing store on the flags, to ensure that
300       // the reader of that value (using load_acquire) will be able to observe
301       // the relativization of the derived pointers
302       uint8_t flags_under_lock = flags();
303       release_set_flags(flags_under_lock | FLAG_GC_MODE);
304       ml.notify_all();
305       return;
306     }
307 
308     if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) {
309       // Successfully set the terminal state; we are done
310       return;
311     }
312   }
313 }
314 
315 void stackChunkOopDesc::relativize_derived_pointers_concurrently() {
316   if (!try_acquire_relativization()) {
317     // Already relativized
318     return;
319   }
320 
321   DerivedPointersSupport::RelativizeClosure derived_cl;
322   EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl);
323   iterate_stack(&frame_cl);
324   frame_cl.do_lockstack();
325 
326   release_relativization();
327 }
328 
329 class TransformStackChunkClosure {
330   stackChunkOop _chunk;
331 
332 public:
333   TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { }
334 
335   template <ChunkFrames frame_kind, typename RegisterMapT>
336   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
337     DerivedPointersSupport::RelativizeClosure derived_cl;
338     f.iterate_derived_pointers(&derived_cl, map);
339 
340     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
341     frame fr = f.to_frame();
342     FrameOopIterator<RegisterMapT> iterator(fr, map);
343     bs_chunk->encode_gc_mode(_chunk, &iterator);
344 
345     return true;
346   }
347 
348   bool do_lockstack() {
349     BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
350     LockStackOopIterator iterator(_chunk);
351     bs_chunk->encode_gc_mode(_chunk, &iterator);
352 
353     return true;
354   }
355 };
356 
357 void stackChunkOopDesc::transform() {
358   assert(!is_gc_mode(), "Should only be called once per chunk");
359   set_gc_mode(true);
360 
361   assert(!has_bitmap(), "Should only be set once");
362   set_has_bitmap(true);
363   bitmap().clear();
364 
365   TransformStackChunkClosure closure(this);
366   iterate_stack(&closure);
367   closure.do_lockstack();
368 }
369 
370 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap>
371 class BarrierClosure: public OopClosure {
372   NOT_PRODUCT(intptr_t* _sp;)
373 
374 public:
375   BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {}
376 
377   virtual void do_oop(oop* p)       override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
378   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
379 
380   template <class T> inline void do_oop_work(T* p) {
381     oop value = (oop)HeapAccess<>::oop_load(p);
382     if (barrier == stackChunkOopDesc::BarrierType::Store) {
383       HeapAccess<>::oop_store(p, value);
384     }
385   }
386 };
387 
388 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
389 void stackChunkOopDesc::do_barriers0(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
390   // We need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned
391   assert (!f.is_done(), "");
392 
393   if (f.is_interpreted()) {
394     Method* m = f.to_frame().interpreter_frame_method();
395     // Class redefinition support
396     m->record_gc_epoch();
397   } else if (f.is_compiled()) {
398     nmethod* nm = f.cb()->as_nmethod();
399     // The entry barrier takes care of having the right synchronization
400     // when keeping the nmethod alive during concurrent execution.
401     nm->run_nmethod_entry_barrier();
402     // There is no need to mark the Method, as class redefinition will walk the
403     // CodeCache, noting their Methods
404   }
405 
406   if (has_bitmap() && UseCompressedOops) {
407     BarrierClosure<barrier, true> oops_closure(f.sp());
408     f.iterate_oops(&oops_closure, map);
409   } else {
410     BarrierClosure<barrier, false> oops_closure(f.sp());
411     f.iterate_oops(&oops_closure, map);
412   }
413 }
414 
415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
422 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
423 
424 template <typename RegisterMapT>
425 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
426   if (!(is_gc_mode() || requires_barriers())) {
427     return;
428   }
429 
430   BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
431   FrameOopIterator<RegisterMapT> iterator(f, map);
432   bs_chunk->decode_gc_mode(this, &iterator);
433 
434   if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
435     DerivedPointersSupport::DerelativizeClosure derived_closure;
436     OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
437     visitor.oops_do(&f, map, f.oop_map());
438   }
439 }
440 
441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
443 
444 void stackChunkOopDesc::transfer_lockstack(oop* dst) {
445   const bool requires_gc_barriers = is_gc_mode() || requires_barriers();
446   const bool requires_uncompress = has_bitmap() && UseCompressedOops;
447   const auto load_and_clear_obj = [&](intptr_t* at) -> oop {
448     if (requires_gc_barriers) {
449       if (requires_uncompress) {
450         oop value = HeapAccess<>::oop_load(reinterpret_cast<narrowOop*>(at));
451         HeapAccess<>::oop_store(reinterpret_cast<narrowOop*>(at), nullptr);
452         return value;
453       } else {
454         oop value = HeapAccess<>::oop_load(reinterpret_cast<oop*>(at));
455         HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr);
456         return value;
457       }
458     } else {
459       oop value = *reinterpret_cast<oop*>(at);
460       HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr);
461       return value;
462     }
463   };
464 
465   const int cnt = lockstack_size();
466   intptr_t* lockstack_start = start_address();
467   for (int i = 0; i < cnt; i++) {
468     oop mon_owner = load_and_clear_obj(&lockstack_start[i]);
469     assert(oopDesc::is_oop(mon_owner), "not an oop");
470     dst[i] = mon_owner;
471   }
472 }
473 
474 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const {
475   if (*((juint*)this) == badHeapWordVal) {
476     st->print_cr("BAD WORD");
477   } else {
478     InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st);
479   }
480 }
481 
482 #ifdef ASSERT
483 
484 class StackChunkVerifyOopsClosure : public OopClosure {
485   stackChunkOop _chunk;
486   int _count;
487 
488 public:
489   StackChunkVerifyOopsClosure(stackChunkOop chunk)
490     : _chunk(chunk), _count(0) {}
491 
492   void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); }
493   void do_oop(narrowOop* p) override { do_oop_work(p); }
494 
495   template <typename T> inline void do_oop_work(T* p) {
496     _count++;
497     oop obj = _chunk->load_oop(p);
498     assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj));
499     if (_chunk->has_bitmap()) {
500       BitMap::idx_t index = _chunk->bit_index_for(p);
501       assert(_chunk->bitmap().at(index), "Bit not set at index " SIZE_FORMAT " corresponding to " PTR_FORMAT, index, p2i(p));
502     }
503   }
504 
505   int count() const { return _count; }
506 };
507 
508 class VerifyStackChunkFrameClosure {
509   stackChunkOop _chunk;
510 
511 public:
512   intptr_t* _sp;
513   CodeBlob* _cb;
514   bool _callee_interpreted;
515   int _size;
516   int _argsize;
517   int _num_oops;
518   int _num_frames;
519   int _num_interpreted_frames;
520   int _num_i2c;
521 
522   VerifyStackChunkFrameClosure(stackChunkOop chunk)
523     : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
524       _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
525 
526   template <ChunkFrames frame_kind, typename RegisterMapT>
527   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
528     _sp = f.sp();
529     _cb = f.cb();
530 
531     int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
532     int num_oops = f.num_oops();
533     assert(num_oops >= 0, "");
534 
535     _argsize   = f.stack_argsize() + frame::metadata_words_at_top;
536     _size     += fsize;
537     _num_oops += num_oops;
538     if (f.is_interpreted()) {
539       _num_interpreted_frames++;
540     }
541 
542     log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
543     LogTarget(Trace, continuations) lt;
544     if (lt.develop_is_enabled()) {
545       LogStream ls(lt);
546       f.print_on(&ls);
547     }
548     assert(f.pc() != nullptr,
549            "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
550            !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
551 
552     if (_num_frames == 0) {
553       assert(f.pc() == _chunk->pc(), "");
554     }
555 
556     if (_num_frames > 0 && !_callee_interpreted && f.is_interpreted()) {
557       log_develop_trace(continuations)("debug_verify_stack_chunk i2c");
558       _num_i2c++;
559     }
560 
561     StackChunkVerifyOopsClosure oops_closure(_chunk);
562     f.iterate_oops(&oops_closure, map);
563     assert(oops_closure.count() == num_oops, "oops: %d oopmap->num_oops(): %d", oops_closure.count(), num_oops);
564 
565     _callee_interpreted = f.is_interpreted();
566     _num_frames++;
567     return true;
568   }
569 };
570 
571 template <typename T>
572 class StackChunkVerifyBitmapClosure : public BitMapClosure {
573   stackChunkOop _chunk;
574 
575 public:
576   int _count;
577 
578   StackChunkVerifyBitmapClosure(stackChunkOop chunk) : _chunk(chunk), _count(0) {}
579 
580   bool do_bit(BitMap::idx_t index) override {
581     T* p = _chunk->address_for_bit<T>(index);
582     _count++;
583 
584     oop obj = _chunk->load_oop(p);
585     assert(obj == nullptr || dbg_is_good_oop(obj),
586            "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: " SIZE_FORMAT,
587            p2i(p), p2i((oopDesc*)obj), index);
588 
589     return true; // continue processing
590   }
591 };
592 
593 bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, int* out_interpreted_frames) {
594   DEBUG_ONLY(if (!VerifyContinuations) return true;)
595 
596   assert(oopDesc::is_oop(this), "");
597 
598   assert(stack_size() >= 0, "");
599   assert(!has_bitmap() || is_gc_mode(), "");
600 
601   if (is_empty()) {
602     assert(max_thawing_size() == 0, "");
603   } else {
604     assert(argsize() >= 0, "");
605   }
606 
607   assert(oopDesc::is_oop_or_null(parent()), "");
608 
609   const bool concurrent = !Thread::current()->is_Java_thread();
610 
611   // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset)
612   // for the top frame (below sp), and *not* for the bottom frame.
613   int size = bottom() - sp();
614   assert(size >= 0, "");
615   assert((size == 0) == is_empty(), "");
616 
617   const StackChunkFrameStream<ChunkFrames::Mixed> first(this);

618 
619   VerifyStackChunkFrameClosure closure(this);


620   iterate_stack(&closure);
621 
622   assert(!is_empty() || closure._cb == nullptr, "");
623   if (closure._cb != nullptr && closure._cb->is_nmethod()) {
624     assert(argsize() ==
625       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord,
626       "chunk argsize: %d bottom frame argsize: %d", argsize(),
627       (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord);
628   }
629 
630   assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), "");
631 
632   if (!concurrent) {
633     assert(closure._size <= size + (stack_size() - bottom()),
634            "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d",
635            size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size());
636     if (closure._num_frames > 0) {
637       assert(closure._argsize >= frame::metadata_words_at_top, "should be set up");
638       assert(argsize() == closure._argsize - frame::metadata_words_at_top,
639              "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d",
640              argsize(), closure._argsize, closure._callee_interpreted);
641     }
642 
643     int calculated_max_size = closure._size
644                               + closure._num_i2c * frame::align_wiggle
645                               + closure._num_interpreted_frames * frame::align_wiggle;
646     assert(max_thawing_size() == calculated_max_size,
647            "max_size(): %d calculated_max_size: %d argsize: %d num_i2c: %d",
648            max_thawing_size(), calculated_max_size, closure._argsize, closure._num_i2c);
649 
650     if (out_size   != nullptr) *out_size   += size;
651     if (out_oops   != nullptr) *out_oops   += closure._num_oops;
652     if (out_frames != nullptr) *out_frames += closure._num_frames;
653     if (out_interpreted_frames != nullptr) *out_interpreted_frames += closure._num_interpreted_frames;
654   } else {
655     assert(out_size == nullptr, "");
656     assert(out_oops == nullptr, "");
657     assert(out_frames == nullptr, "");
658     assert(out_interpreted_frames == nullptr, "");
659   }
660 
661   if (has_bitmap()) {
662     assert(bitmap().size() == InstanceStackChunkKlass::bitmap_size_in_bits(stack_size()),
663            "bitmap().size(): %zu stack_size: %d",
664            bitmap().size(), stack_size());
665 
666     int oop_count;
667     if (UseCompressedOops) {
668       StackChunkVerifyBitmapClosure<narrowOop> bitmap_closure(this);
669       bitmap().iterate(&bitmap_closure,
670                        bit_index_for((narrowOop*)(sp_address() - frame::metadata_words_at_bottom)),
671                        bit_index_for((narrowOop*)end_address()));
672       oop_count = bitmap_closure._count;
673     } else {
674       StackChunkVerifyBitmapClosure<oop> bitmap_closure(this);
675       bitmap().iterate(&bitmap_closure,
676                        bit_index_for((oop*)(sp_address() - frame::metadata_words_at_bottom)),
677                        bit_index_for((oop*)end_address()));
678       oop_count = bitmap_closure._count;
679     }
680     assert(oop_count == closure._num_oops,
681            "bitmap_closure._count: %d closure._num_oops: %d", oop_count, closure._num_oops);
682   }
683 
684   return true;
685 }
686 #endif
--- EOF ---