1 /* 2 * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "code/nmethod.hpp" 26 #include "code/scopeDesc.hpp" 27 #include "gc/shared/barrierSet.hpp" 28 #include "gc/shared/barrierSetStackChunk.hpp" 29 #include "logging/log.hpp" 30 #include "logging/logStream.hpp" 31 #include "memory/memRegion.hpp" 32 #include "oops/instanceStackChunkKlass.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "oops/stackChunkOop.inline.hpp" 35 #include "runtime/frame.hpp" 36 #include "runtime/registerMap.hpp" 37 #include "runtime/smallRegisterMap.inline.hpp" 38 #include "runtime/stackChunkFrameStream.inline.hpp" 39 40 // Note: Some functions in this file work with stale object pointers, e.g. 41 // DerivedPointerSupport. Be extra careful to not put those pointers into 42 // variables of the 'oop' type. There's extra GC verification around oops 43 // that may fail when stale oops are being used. 44 45 template <typename RegisterMapT> 46 class FrameOopIterator : public OopIterator { 47 private: 48 const frame& _f; 49 const RegisterMapT* _map; 50 51 public: 52 FrameOopIterator(const frame& f, const RegisterMapT* map) 53 : _f(f), 54 _map(map) { 55 } 56 57 virtual void oops_do(OopClosure* cl) override { 58 if (_f.is_interpreted_frame()) { 59 _f.oops_interpreted_do(cl, nullptr); 60 } else { 61 OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr); 62 visitor.oops_do(&_f, _map, _f.oop_map()); 63 } 64 } 65 }; 66 67 class LockStackOopIterator : public OopIterator { 68 private: 69 const stackChunkOop _chunk; 70 public: 71 LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {} 72 73 virtual void oops_do(OopClosure* cl) override { 74 int cnt = _chunk->lockstack_size(); 75 oop* lockstack_start = (oop*)_chunk->start_address(); 76 for (int i = 0; i < cnt; i++) { 77 cl->do_oop(&lockstack_start[i]); 78 } 79 } 80 }; 81 82 frame stackChunkOopDesc::top_frame(RegisterMap* map) { 83 assert(!is_empty(), ""); 84 StackChunkFrameStream<ChunkFrames::Mixed> fs(this); 85 86 map->set_stack_chunk(this); 87 fs.initialize_register_map(map); 88 89 frame f = fs.to_frame(); 90 91 assert(to_offset(f.sp()) == sp(), "f.offset_sp(): %d sp(): %d async: %d", f.offset_sp(), sp(), map->is_async()); 92 relativize_frame(f); 93 f.set_frame_index(0); 94 return f; 95 } 96 97 frame stackChunkOopDesc::sender(const frame& f, RegisterMap* map) { 98 assert(map->in_cont(), ""); 99 assert(!map->include_argument_oops(), ""); 100 assert(!f.is_empty(), ""); 101 assert(map->stack_chunk() == this, ""); 102 assert(!is_empty(), ""); 103 104 int index = f.frame_index(); // we need to capture the index before calling derelativize, which destroys it 105 StackChunkFrameStream<ChunkFrames::Mixed> fs(this, derelativize(f)); 106 fs.next(map); 107 108 if (!fs.is_done()) { 109 frame sender = fs.to_frame(); 110 assert(is_usable_in_chunk(sender.unextended_sp()), ""); 111 relativize_frame(sender); 112 113 sender.set_frame_index(index+1); 114 return sender; 115 } 116 117 if (parent() != nullptr) { 118 assert(!parent()->is_empty(), ""); 119 return parent()->top_frame(map); 120 } 121 122 return Continuation::continuation_parent_frame(map); 123 } 124 125 static int num_java_frames(nmethod* nm, address pc) { 126 int count = 0; 127 for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) { 128 count++; 129 } 130 return count; 131 } 132 133 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) { 134 assert(f.is_interpreted() 135 || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), ""); 136 return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc()); 137 } 138 139 int stackChunkOopDesc::num_java_frames() const { 140 int n = 0; 141 for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done(); 142 f.next(SmallRegisterMap::instance())) { 143 if (!f.is_stub()) { 144 n += ::num_java_frames(f); 145 } 146 } 147 return n; 148 } 149 150 template <stackChunkOopDesc::BarrierType barrier> 151 class DoBarriersStackClosure { 152 const stackChunkOop _chunk; 153 154 public: 155 DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {} 156 157 template <ChunkFrames frame_kind, typename RegisterMapT> 158 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 159 _chunk->do_barriers0<barrier>(f, map); 160 return true; 161 } 162 }; 163 164 template <stackChunkOopDesc::BarrierType barrier> 165 void stackChunkOopDesc::do_barriers() { 166 DoBarriersStackClosure<barrier> closure(this); 167 iterate_stack(&closure); 168 } 169 170 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Load> (); 171 template void stackChunkOopDesc::do_barriers<stackChunkOopDesc::BarrierType::Store>(); 172 173 class DerivedPointersSupport { 174 public: 175 static void relativize(derived_base* base_loc, derived_pointer* derived_loc) { 176 // The base oop could be stale from the GC's point-of-view. Treat it as an 177 // uintptr_t to stay clear of the oop verification code in oopsHierarcy.hpp. 178 uintptr_t base = *(uintptr_t*)base_loc; 179 if (base == 0) { 180 return; 181 } 182 assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), ""); 183 184 // This is always a full derived pointer 185 uintptr_t derived_int_val = *(uintptr_t*)derived_loc; 186 187 // Make the pointer an offset (relativize) and store it at the same location 188 uintptr_t offset = derived_int_val - base; 189 *(uintptr_t*)derived_loc = offset; 190 } 191 192 static void derelativize(derived_base* base_loc, derived_pointer* derived_loc) { 193 uintptr_t base = *(uintptr_t*)base_loc; 194 if (base == 0) { 195 return; 196 } 197 assert(!UseCompressedOops || !CompressedOops::is_base((void*)base), ""); 198 199 // All derived pointers should have been relativized into offsets 200 uintptr_t offset = *(uintptr_t*)derived_loc; 201 202 // Restore the original derived pointer 203 *(uintptr_t*)derived_loc = base + offset; 204 } 205 206 struct RelativizeClosure : public DerivedOopClosure { 207 virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override { 208 DerivedPointersSupport::relativize(base_loc, derived_loc); 209 } 210 }; 211 212 struct DerelativizeClosure : public DerivedOopClosure { 213 virtual void do_derived_oop(derived_base* base_loc, derived_pointer* derived_loc) override { 214 DerivedPointersSupport::derelativize(base_loc, derived_loc); 215 } 216 }; 217 }; 218 219 template <typename DerivedPointerClosureType> 220 class EncodeGCModeConcurrentFrameClosure { 221 stackChunkOop _chunk; 222 DerivedPointerClosureType* _cl; 223 224 public: 225 EncodeGCModeConcurrentFrameClosure(stackChunkOop chunk, DerivedPointerClosureType* cl) 226 : _chunk(chunk), 227 _cl(cl) { 228 } 229 230 template <ChunkFrames frame_kind, typename RegisterMapT> 231 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 232 f.iterate_derived_pointers(_cl, map); 233 234 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk(); 235 frame fr = f.to_frame(); 236 FrameOopIterator<RegisterMapT> iterator(fr, map); 237 bs_chunk->encode_gc_mode(_chunk, &iterator); 238 239 return true; 240 } 241 242 bool do_lockstack() { 243 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk(); 244 LockStackOopIterator iterator(_chunk); 245 bs_chunk->encode_gc_mode(_chunk, &iterator); 246 247 return true; 248 } 249 }; 250 251 bool stackChunkOopDesc::try_acquire_relativization() { 252 for (;;) { 253 // We use an acquiring load when reading the flags to ensure that if we leave this 254 // function thinking that relativization is finished, we know that if another thread 255 // did the relativization, we will still be able to observe the relativized derived 256 // pointers, which is important as subsequent modifications of derived pointers must 257 // happen after relativization. 258 uint8_t flags_before = flags_acquire(); 259 if ((flags_before & FLAG_GC_MODE) != 0) { 260 // Terminal state - relativization is ensured 261 return false; 262 } 263 264 if ((flags_before & FLAG_CLAIM_RELATIVIZE) != 0) { 265 // Someone else has claimed relativization - wait for completion 266 MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag); 267 uint8_t flags_under_lock = flags_acquire(); 268 if ((flags_under_lock & FLAG_GC_MODE) != 0) { 269 // Terminal state - relativization is ensured 270 return false; 271 } 272 273 if ((flags_under_lock & FLAG_NOTIFY_RELATIVIZE) != 0) { 274 // Relativization is claimed by another thread, and it knows it needs to notify 275 ml.wait(); 276 } else if (try_set_flags(flags_under_lock, flags_under_lock | FLAG_NOTIFY_RELATIVIZE)) { 277 // Relativization is claimed by another thread, and it knows it needs to notify 278 ml.wait(); 279 } 280 // Retry - rerun the loop 281 continue; 282 } 283 284 if (try_set_flags(flags_before, flags_before | FLAG_CLAIM_RELATIVIZE)) { 285 // Claimed relativization - let's do it 286 return true; 287 } 288 } 289 } 290 291 void stackChunkOopDesc::release_relativization() { 292 for (;;) { 293 uint8_t flags_before = flags(); 294 if ((flags_before & FLAG_NOTIFY_RELATIVIZE) != 0) { 295 MonitorLocker ml(ContinuationRelativize_lock, Mutex::_no_safepoint_check_flag); 296 // No need to CAS the terminal state; nobody else can be racingly mutating here 297 // as both claim and notify flags are already set (and monotonic) 298 // We do however need to use a releasing store on the flags, to ensure that 299 // the reader of that value (using load_acquire) will be able to observe 300 // the relativization of the derived pointers 301 uint8_t flags_under_lock = flags(); 302 release_set_flags(flags_under_lock | FLAG_GC_MODE); 303 ml.notify_all(); 304 return; 305 } 306 307 if (try_set_flags(flags_before, flags_before | FLAG_GC_MODE)) { 308 // Successfully set the terminal state; we are done 309 return; 310 } 311 } 312 } 313 314 void stackChunkOopDesc::relativize_derived_pointers_concurrently() { 315 if (!try_acquire_relativization()) { 316 // Already relativized 317 return; 318 } 319 320 DerivedPointersSupport::RelativizeClosure derived_cl; 321 EncodeGCModeConcurrentFrameClosure<decltype(derived_cl)> frame_cl(this, &derived_cl); 322 iterate_stack(&frame_cl); 323 frame_cl.do_lockstack(); 324 325 release_relativization(); 326 } 327 328 class TransformStackChunkClosure { 329 stackChunkOop _chunk; 330 331 public: 332 TransformStackChunkClosure(stackChunkOop chunk) : _chunk(chunk) { } 333 334 template <ChunkFrames frame_kind, typename RegisterMapT> 335 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 336 DerivedPointersSupport::RelativizeClosure derived_cl; 337 f.iterate_derived_pointers(&derived_cl, map); 338 339 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk(); 340 frame fr = f.to_frame(); 341 FrameOopIterator<RegisterMapT> iterator(fr, map); 342 bs_chunk->encode_gc_mode(_chunk, &iterator); 343 344 return true; 345 } 346 347 bool do_lockstack() { 348 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk(); 349 LockStackOopIterator iterator(_chunk); 350 bs_chunk->encode_gc_mode(_chunk, &iterator); 351 352 return true; 353 } 354 }; 355 356 void stackChunkOopDesc::transform() { 357 assert(!is_gc_mode(), "Should only be called once per chunk"); 358 set_gc_mode(true); 359 360 assert(!has_bitmap(), "Should only be set once"); 361 set_has_bitmap(true); 362 bitmap().clear(); 363 364 TransformStackChunkClosure closure(this); 365 iterate_stack(&closure); 366 closure.do_lockstack(); 367 } 368 369 template <stackChunkOopDesc::BarrierType barrier, bool compressedOopsWithBitmap> 370 class BarrierClosure: public OopClosure { 371 NOT_PRODUCT(intptr_t* _sp;) 372 373 public: 374 BarrierClosure(intptr_t* sp) NOT_PRODUCT(: _sp(sp)) {} 375 376 virtual void do_oop(oop* p) override { compressedOopsWithBitmap ? do_oop_work((narrowOop*)p) : do_oop_work(p); } 377 virtual void do_oop(narrowOop* p) override { do_oop_work(p); } 378 379 template <class T> inline void do_oop_work(T* p) { 380 oop value = (oop)HeapAccess<>::oop_load(p); 381 if (barrier == stackChunkOopDesc::BarrierType::Store) { 382 HeapAccess<>::oop_store(p, value); 383 } 384 } 385 }; 386 387 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT> 388 void stackChunkOopDesc::do_barriers0(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 389 // We need to invoke the write barriers so as not to miss oops in old chunks that haven't yet been concurrently scanned 390 assert (!f.is_done(), ""); 391 392 if (f.is_interpreted()) { 393 Method* m = f.to_frame().interpreter_frame_method(); 394 // Class redefinition support 395 m->record_gc_epoch(); 396 } else if (f.is_compiled()) { 397 nmethod* nm = f.cb()->as_nmethod(); 398 // The entry barrier takes care of having the right synchronization 399 // when keeping the nmethod alive during concurrent execution. 400 nm->run_nmethod_entry_barrier(); 401 // There is no need to mark the Method, as class redefinition will walk the 402 // CodeCache, noting their Methods 403 } 404 405 if (has_bitmap() && UseCompressedOops) { 406 BarrierClosure<barrier, true> oops_closure(f.sp()); 407 f.iterate_oops(&oops_closure, map); 408 } else { 409 BarrierClosure<barrier, false> oops_closure(f.sp()); 410 f.iterate_oops(&oops_closure, map); 411 } 412 } 413 414 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map); 415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map); 416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map); 417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map); 418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map); 419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map); 420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map); 421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map); 422 423 template <typename RegisterMapT> 424 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) { 425 if (!(is_gc_mode() || requires_barriers())) { 426 return; 427 } 428 429 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk(); 430 FrameOopIterator<RegisterMapT> iterator(f, map); 431 bs_chunk->decode_gc_mode(this, &iterator); 432 433 if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) { 434 DerivedPointersSupport::DerelativizeClosure derived_closure; 435 OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure); 436 visitor.oops_do(&f, map, f.oop_map()); 437 } 438 } 439 440 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map); 441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map); 442 443 void stackChunkOopDesc::transfer_lockstack(oop* dst, bool requires_barriers) { 444 const bool requires_gc_barriers = is_gc_mode() || requires_barriers; 445 const bool requires_uncompress = has_bitmap() && UseCompressedOops; 446 const auto load_and_clear_obj = [&](intptr_t* at) -> oop { 447 if (requires_gc_barriers) { 448 if (requires_uncompress) { 449 oop value = HeapAccess<>::oop_load(reinterpret_cast<narrowOop*>(at)); 450 HeapAccess<>::oop_store(reinterpret_cast<narrowOop*>(at), nullptr); 451 return value; 452 } else { 453 oop value = HeapAccess<>::oop_load(reinterpret_cast<oop*>(at)); 454 HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr); 455 return value; 456 } 457 } else { 458 oop value = *reinterpret_cast<oop*>(at); 459 return value; 460 } 461 }; 462 463 const int cnt = lockstack_size(); 464 intptr_t* lockstack_start = start_address(); 465 for (int i = 0; i < cnt; i++) { 466 oop mon_owner = load_and_clear_obj(&lockstack_start[i]); 467 assert(oopDesc::is_oop(mon_owner), "not an oop"); 468 dst[i] = mon_owner; 469 } 470 } 471 472 void stackChunkOopDesc::print_on(bool verbose, outputStream* st) const { 473 if (*((juint*)this) == badHeapWordVal) { 474 st->print_cr("BAD WORD"); 475 } else { 476 InstanceStackChunkKlass::print_chunk(const_cast<stackChunkOopDesc*>(this), verbose, st); 477 } 478 } 479 480 #ifdef ASSERT 481 482 class StackChunkVerifyOopsClosure : public OopClosure { 483 stackChunkOop _chunk; 484 int _count; 485 486 public: 487 StackChunkVerifyOopsClosure(stackChunkOop chunk) 488 : _chunk(chunk), _count(0) {} 489 490 void do_oop(oop* p) override { (_chunk->has_bitmap() && UseCompressedOops) ? do_oop_work((narrowOop*)p) : do_oop_work(p); } 491 void do_oop(narrowOop* p) override { do_oop_work(p); } 492 493 template <typename T> inline void do_oop_work(T* p) { 494 _count++; 495 oop obj = _chunk->load_oop(p); 496 assert(obj == nullptr || dbg_is_good_oop(obj), "p: " PTR_FORMAT " obj: " PTR_FORMAT, p2i(p), p2i(obj)); 497 if (_chunk->has_bitmap()) { 498 BitMap::idx_t index = _chunk->bit_index_for(p); 499 assert(_chunk->bitmap().at(index), "Bit not set at index %zu corresponding to " PTR_FORMAT, index, p2i(p)); 500 } 501 } 502 503 int count() const { return _count; } 504 }; 505 506 class VerifyStackChunkFrameClosure { 507 stackChunkOop _chunk; 508 509 public: 510 intptr_t* _sp; 511 CodeBlob* _cb; 512 bool _callee_interpreted; 513 int _size; 514 int _argsize; 515 int _num_oops; 516 int _num_frames; 517 int _num_interpreted_frames; 518 int _num_i2c; 519 520 VerifyStackChunkFrameClosure(stackChunkOop chunk) 521 : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false), 522 _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {} 523 524 template <ChunkFrames frame_kind, typename RegisterMapT> 525 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 526 _sp = f.sp(); 527 _cb = f.cb(); 528 529 int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0); 530 int num_oops = f.num_oops(); 531 assert(num_oops >= 0, ""); 532 533 _argsize = f.stack_argsize() + frame::metadata_words_at_top; 534 _size += fsize; 535 _num_oops += num_oops; 536 if (f.is_interpreted()) { 537 _num_interpreted_frames++; 538 } 539 540 log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops); 541 LogTarget(Trace, continuations) lt; 542 if (lt.develop_is_enabled()) { 543 LogStream ls(lt); 544 f.print_on(&ls); 545 } 546 assert(f.pc() != nullptr, 547 "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT, 548 !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address())); 549 550 if (_num_frames == 0) { 551 assert(f.pc() == _chunk->pc(), ""); 552 } 553 554 if (_num_frames > 0 && !_callee_interpreted && f.is_interpreted()) { 555 log_develop_trace(continuations)("debug_verify_stack_chunk i2c"); 556 _num_i2c++; 557 } 558 559 StackChunkVerifyOopsClosure oops_closure(_chunk); 560 f.iterate_oops(&oops_closure, map); 561 assert(oops_closure.count() == num_oops, "oops: %d oopmap->num_oops(): %d", oops_closure.count(), num_oops); 562 563 _callee_interpreted = f.is_interpreted(); 564 _num_frames++; 565 return true; 566 } 567 }; 568 569 template <typename T> 570 class StackChunkVerifyBitmapClosure : public BitMapClosure { 571 stackChunkOop _chunk; 572 573 public: 574 int _count; 575 576 StackChunkVerifyBitmapClosure(stackChunkOop chunk) : _chunk(chunk), _count(0) {} 577 578 bool do_bit(BitMap::idx_t index) override { 579 T* p = _chunk->address_for_bit<T>(index); 580 _count++; 581 582 oop obj = _chunk->load_oop(p); 583 assert(obj == nullptr || dbg_is_good_oop(obj), 584 "p: " PTR_FORMAT " obj: " PTR_FORMAT " index: %zu", 585 p2i(p), p2i((oopDesc*)obj), index); 586 587 return true; // continue processing 588 } 589 }; 590 591 bool stackChunkOopDesc::verify(size_t* out_size, int* out_oops, int* out_frames, int* out_interpreted_frames) { 592 DEBUG_ONLY(if (!VerifyContinuations) return true;) 593 594 assert(oopDesc::is_oop(this), ""); 595 596 assert(stack_size() >= 0, ""); 597 assert(!has_bitmap() || is_gc_mode(), ""); 598 599 if (is_empty()) { 600 assert(max_thawing_size() == 0, ""); 601 } else { 602 assert(argsize() >= 0, ""); 603 } 604 605 assert(oopDesc::is_oop_or_null(parent()), ""); 606 607 const bool concurrent = !Thread::current()->is_Java_thread(); 608 609 // If argsize == 0 and the chunk isn't mixed, the chunk contains the metadata (pc, fp -- frame::sender_sp_offset) 610 // for the top frame (below sp), and *not* for the bottom frame. 611 int size = bottom() - sp(); 612 assert(size >= 0, ""); 613 assert((size == 0) == is_empty(), ""); 614 615 const StackChunkFrameStream<ChunkFrames::Mixed> first(this); 616 617 VerifyStackChunkFrameClosure closure(this); 618 iterate_stack(&closure); 619 620 assert(!is_empty() || closure._cb == nullptr, ""); 621 if (closure._cb != nullptr && closure._cb->is_nmethod()) { 622 assert(argsize() == 623 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord, 624 "chunk argsize: %d bottom frame argsize: %d", argsize(), 625 (closure._cb->as_nmethod()->num_stack_arg_slots()*VMRegImpl::stack_slot_size) >>LogBytesPerWord); 626 } 627 628 assert(closure._num_interpreted_frames == 0 || has_mixed_frames(), ""); 629 630 if (!concurrent) { 631 assert(closure._size <= size + (stack_size() - bottom()), 632 "size: %d bottom: %d closure.size: %d end sp: " PTR_FORMAT " start sp: %d chunk size: %d", 633 size, bottom(), closure._size, closure._sp - start_address(), sp(), stack_size()); 634 if (closure._num_frames > 0) { 635 assert(closure._argsize >= frame::metadata_words_at_top, "should be set up"); 636 assert(argsize() == closure._argsize - frame::metadata_words_at_top, 637 "argsize(): %d closure.argsize: %d closure.callee_interpreted: %d", 638 argsize(), closure._argsize, closure._callee_interpreted); 639 } 640 641 int calculated_max_size = closure._size 642 + closure._num_i2c * frame::align_wiggle 643 + closure._num_interpreted_frames * frame::align_wiggle; 644 assert(max_thawing_size() == calculated_max_size, 645 "max_size(): %d calculated_max_size: %d argsize: %d num_i2c: %d", 646 max_thawing_size(), calculated_max_size, closure._argsize, closure._num_i2c); 647 648 if (out_size != nullptr) *out_size += size; 649 if (out_oops != nullptr) *out_oops += closure._num_oops; 650 if (out_frames != nullptr) *out_frames += closure._num_frames; 651 if (out_interpreted_frames != nullptr) *out_interpreted_frames += closure._num_interpreted_frames; 652 } else { 653 assert(out_size == nullptr, ""); 654 assert(out_oops == nullptr, ""); 655 assert(out_frames == nullptr, ""); 656 assert(out_interpreted_frames == nullptr, ""); 657 } 658 659 if (has_bitmap()) { 660 assert(bitmap().size() == InstanceStackChunkKlass::bitmap_size_in_bits(stack_size()), 661 "bitmap().size(): %zu stack_size: %d", 662 bitmap().size(), stack_size()); 663 664 int oop_count; 665 if (UseCompressedOops) { 666 StackChunkVerifyBitmapClosure<narrowOop> bitmap_closure(this); 667 bitmap().iterate(&bitmap_closure, 668 bit_index_for((narrowOop*)(sp_address() - frame::metadata_words_at_bottom)), 669 bit_index_for((narrowOop*)end_address())); 670 oop_count = bitmap_closure._count; 671 } else { 672 StackChunkVerifyBitmapClosure<oop> bitmap_closure(this); 673 bitmap().iterate(&bitmap_closure, 674 bit_index_for((oop*)(sp_address() - frame::metadata_words_at_bottom)), 675 bit_index_for((oop*)end_address())); 676 oop_count = bitmap_closure._count; 677 } 678 assert(oop_count == closure._num_oops, 679 "bitmap_closure._count: %d closure._num_oops: %d", oop_count, closure._num_oops); 680 } 681 682 return true; 683 } 684 #endif