39
40 // Note: Some functions in this file work with stale object pointers, e.g.
41 // DerivedPointerSupport. Be extra careful to not put those pointers into
42 // variables of the 'oop' type. There's extra GC verification around oops
43 // that may fail when stale oops are being used.
44
45 template <typename RegisterMapT>
46 class FrameOopIterator : public OopIterator {
47 private:
48 const frame& _f;
49 const RegisterMapT* _map;
50
51 public:
52 FrameOopIterator(const frame& f, const RegisterMapT* map)
53 : _f(f),
54 _map(map) {
55 }
56
57 virtual void oops_do(OopClosure* cl) override {
58 if (_f.is_interpreted_frame()) {
59 _f.oops_interpreted_do(cl, nullptr);
60 } else {
61 OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
62 visitor.oops_do(&_f, _map, _f.oop_map());
63 }
64 }
65 };
66
67 class LockStackOopIterator : public OopIterator {
68 private:
69 const stackChunkOop _chunk;
70 public:
71 LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
72
73 virtual void oops_do(OopClosure* cl) override {
74 int cnt = _chunk->lockstack_size();
75 oop* lockstack_start = (oop*)_chunk->start_address();
76 for (int i = 0; i < cnt; i++) {
77 cl->do_oop(&lockstack_start[i]);
78 }
79 }
122 return Continuation::continuation_parent_frame(map);
123 }
124
125 static int num_java_frames(nmethod* nm, address pc) {
126 int count = 0;
127 for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
128 count++;
129 }
130 return count;
131 }
132
133 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
134 assert(f.is_interpreted()
135 || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
136 return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
137 }
138
139 int stackChunkOopDesc::num_java_frames() const {
140 int n = 0;
141 for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
142 f.next(SmallRegisterMap::instance())) {
143 if (!f.is_stub()) {
144 n += ::num_java_frames(f);
145 }
146 }
147 return n;
148 }
149
150 template <stackChunkOopDesc::BarrierType barrier>
151 class DoBarriersStackClosure {
152 const stackChunkOop _chunk;
153
154 public:
155 DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
156
157 template <ChunkFrames frame_kind, typename RegisterMapT>
158 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
159 _chunk->do_barriers0<barrier>(f, map);
160 return true;
161 }
162 };
398 // The entry barrier takes care of having the right synchronization
399 // when keeping the nmethod alive during concurrent execution.
400 nm->run_nmethod_entry_barrier();
401 // There is no need to mark the Method, as class redefinition will walk the
402 // CodeCache, noting their Methods
403 }
404
405 if (has_bitmap() && UseCompressedOops) {
406 BarrierClosure<barrier, true> oops_closure(f.sp());
407 f.iterate_oops(&oops_closure, map);
408 } else {
409 BarrierClosure<barrier, false> oops_closure(f.sp());
410 f.iterate_oops(&oops_closure, map);
411 }
412 }
413
414 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMap* map);
420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMap* map);
422
423 template <typename RegisterMapT>
424 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
425 if (!(is_gc_mode() || requires_barriers())) {
426 return;
427 }
428
429 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
430 FrameOopIterator<RegisterMapT> iterator(f, map);
431 bs_chunk->decode_gc_mode(this, &iterator);
432
433 if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
434 DerivedPointersSupport::DerelativizeClosure derived_closure;
435 OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
436 visitor.oops_do(&f, map, f.oop_map());
437 }
438 }
439
440 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
441 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMap* map);
442
443 void stackChunkOopDesc::transfer_lockstack(oop* dst, bool requires_barriers) {
444 const bool requires_gc_barriers = is_gc_mode() || requires_barriers;
445 const bool requires_uncompress = has_bitmap() && UseCompressedOops;
446 const auto load_and_clear_obj = [&](intptr_t* at) -> oop {
447 if (requires_gc_barriers) {
448 if (requires_uncompress) {
449 oop value = HeapAccess<>::oop_load(reinterpret_cast<narrowOop*>(at));
450 HeapAccess<>::oop_store(reinterpret_cast<narrowOop*>(at), nullptr);
451 return value;
452 } else {
453 oop value = HeapAccess<>::oop_load(reinterpret_cast<oop*>(at));
454 HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr);
455 return value;
456 }
457 } else {
458 oop value = *reinterpret_cast<oop*>(at);
459 return value;
460 }
461 };
510 intptr_t* _sp;
511 CodeBlob* _cb;
512 bool _callee_interpreted;
513 int _size;
514 int _argsize;
515 int _num_oops;
516 int _num_frames;
517 int _num_interpreted_frames;
518 int _num_i2c;
519
520 VerifyStackChunkFrameClosure(stackChunkOop chunk)
521 : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
522 _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
523
524 template <ChunkFrames frame_kind, typename RegisterMapT>
525 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
526 _sp = f.sp();
527 _cb = f.cb();
528
529 int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
530 int num_oops = f.num_oops();
531 assert(num_oops >= 0, "");
532
533 _argsize = f.stack_argsize() + frame::metadata_words_at_top;
534 _size += fsize;
535 _num_oops += num_oops;
536 if (f.is_interpreted()) {
537 _num_interpreted_frames++;
538 }
539
540 log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
541 LogTarget(Trace, continuations) lt;
542 if (lt.develop_is_enabled()) {
543 LogStream ls(lt);
544 f.print_on(&ls);
545 }
546 assert(f.pc() != nullptr,
547 "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
548 !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
549
550 if (_num_frames == 0) {
|
39
40 // Note: Some functions in this file work with stale object pointers, e.g.
41 // DerivedPointerSupport. Be extra careful to not put those pointers into
42 // variables of the 'oop' type. There's extra GC verification around oops
43 // that may fail when stale oops are being used.
44
45 template <typename RegisterMapT>
46 class FrameOopIterator : public OopIterator {
47 private:
48 const frame& _f;
49 const RegisterMapT* _map;
50
51 public:
52 FrameOopIterator(const frame& f, const RegisterMapT* map)
53 : _f(f),
54 _map(map) {
55 }
56
57 virtual void oops_do(OopClosure* cl) override {
58 if (_f.is_interpreted_frame()) {
59 _f.oops_interpreted_do(cl, _map);
60 } else {
61 OopMapDo<OopClosure, DerivedOopClosure, IncludeAllValues> visitor(cl, nullptr);
62 visitor.oops_do(&_f, _map, _f.oop_map());
63 }
64 }
65 };
66
67 class LockStackOopIterator : public OopIterator {
68 private:
69 const stackChunkOop _chunk;
70 public:
71 LockStackOopIterator(const stackChunkOop chunk) : _chunk(chunk) {}
72
73 virtual void oops_do(OopClosure* cl) override {
74 int cnt = _chunk->lockstack_size();
75 oop* lockstack_start = (oop*)_chunk->start_address();
76 for (int i = 0; i < cnt; i++) {
77 cl->do_oop(&lockstack_start[i]);
78 }
79 }
122 return Continuation::continuation_parent_frame(map);
123 }
124
125 static int num_java_frames(nmethod* nm, address pc) {
126 int count = 0;
127 for (ScopeDesc* scope = nm->scope_desc_at(pc); scope != nullptr; scope = scope->sender()) {
128 count++;
129 }
130 return count;
131 }
132
133 static int num_java_frames(const StackChunkFrameStream<ChunkFrames::Mixed>& f) {
134 assert(f.is_interpreted()
135 || (f.cb() != nullptr && f.cb()->is_nmethod() && f.cb()->as_nmethod()->is_java_method()), "");
136 return f.is_interpreted() ? 1 : num_java_frames(f.cb()->as_nmethod(), f.orig_pc());
137 }
138
139 int stackChunkOopDesc::num_java_frames() const {
140 int n = 0;
141 for (StackChunkFrameStream<ChunkFrames::Mixed> f(const_cast<stackChunkOopDesc*>(this)); !f.is_done();
142 f.next(SmallRegisterMap::instance_no_args())) {
143 if (!f.is_stub()) {
144 n += ::num_java_frames(f);
145 }
146 }
147 return n;
148 }
149
150 template <stackChunkOopDesc::BarrierType barrier>
151 class DoBarriersStackClosure {
152 const stackChunkOop _chunk;
153
154 public:
155 DoBarriersStackClosure(stackChunkOop chunk) : _chunk(chunk) {}
156
157 template <ChunkFrames frame_kind, typename RegisterMapT>
158 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
159 _chunk->do_barriers0<barrier>(f, map);
160 return true;
161 }
162 };
398 // The entry barrier takes care of having the right synchronization
399 // when keeping the nmethod alive during concurrent execution.
400 nm->run_nmethod_entry_barrier();
401 // There is no need to mark the Method, as class redefinition will walk the
402 // CodeCache, noting their Methods
403 }
404
405 if (has_bitmap() && UseCompressedOops) {
406 BarrierClosure<barrier, true> oops_closure(f.sp());
407 f.iterate_oops(&oops_closure, map);
408 } else {
409 BarrierClosure<barrier, false> oops_closure(f.sp());
410 f.iterate_oops(&oops_closure, map);
411 }
412 }
413
414 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
415 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const RegisterMap* map);
416 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
417 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const RegisterMap* map);
418 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapNoArgs* map);
419 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapNoArgs* map);
420 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMapNoArgs* map);
421 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::CompiledOnly>& f, const SmallRegisterMapNoArgs* map);
422 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Load> (const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapWithArgs* map);
423 template void stackChunkOopDesc::do_barriers0<stackChunkOopDesc::BarrierType::Store>(const StackChunkFrameStream<ChunkFrames::Mixed>& f, const SmallRegisterMapWithArgs* map);
424
425 template <typename RegisterMapT>
426 void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMapT* map) {
427 if (!(is_gc_mode() || requires_barriers())) {
428 return;
429 }
430
431 BarrierSetStackChunk* bs_chunk = BarrierSet::barrier_set()->barrier_set_stack_chunk();
432 FrameOopIterator<RegisterMapT> iterator(f, map);
433 bs_chunk->decode_gc_mode(this, &iterator);
434
435 if (f.is_compiled_frame() && f.oop_map()->has_derived_oops()) {
436 DerivedPointersSupport::DerelativizeClosure derived_closure;
437 OopMapDo<OopClosure, DerivedPointersSupport::DerelativizeClosure, SkipNullValue> visitor(nullptr, &derived_closure);
438 visitor.oops_do(&f, map, f.oop_map());
439 }
440 }
441
442 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const RegisterMap* map);
443 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMapNoArgs* map);
444 template void stackChunkOopDesc::fix_thawed_frame(const frame& f, const SmallRegisterMapWithArgs* map);
445
446 void stackChunkOopDesc::transfer_lockstack(oop* dst, bool requires_barriers) {
447 const bool requires_gc_barriers = is_gc_mode() || requires_barriers;
448 const bool requires_uncompress = has_bitmap() && UseCompressedOops;
449 const auto load_and_clear_obj = [&](intptr_t* at) -> oop {
450 if (requires_gc_barriers) {
451 if (requires_uncompress) {
452 oop value = HeapAccess<>::oop_load(reinterpret_cast<narrowOop*>(at));
453 HeapAccess<>::oop_store(reinterpret_cast<narrowOop*>(at), nullptr);
454 return value;
455 } else {
456 oop value = HeapAccess<>::oop_load(reinterpret_cast<oop*>(at));
457 HeapAccess<>::oop_store(reinterpret_cast<oop*>(at), nullptr);
458 return value;
459 }
460 } else {
461 oop value = *reinterpret_cast<oop*>(at);
462 return value;
463 }
464 };
513 intptr_t* _sp;
514 CodeBlob* _cb;
515 bool _callee_interpreted;
516 int _size;
517 int _argsize;
518 int _num_oops;
519 int _num_frames;
520 int _num_interpreted_frames;
521 int _num_i2c;
522
523 VerifyStackChunkFrameClosure(stackChunkOop chunk)
524 : _chunk(chunk), _sp(nullptr), _cb(nullptr), _callee_interpreted(false),
525 _size(0), _argsize(0), _num_oops(0), _num_frames(0), _num_interpreted_frames(0), _num_i2c(0) {}
526
527 template <ChunkFrames frame_kind, typename RegisterMapT>
528 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
529 _sp = f.sp();
530 _cb = f.cb();
531
532 int fsize = f.frame_size() - ((f.is_interpreted() == _callee_interpreted) ? _argsize : 0);
533 int num_oops = f.num_oops(map);
534 assert(num_oops >= 0, "");
535
536 _argsize = f.stack_argsize() + frame::metadata_words_at_top;
537 _size += fsize;
538 _num_oops += num_oops;
539 if (f.is_interpreted()) {
540 _num_interpreted_frames++;
541 }
542
543 log_develop_trace(continuations)("debug_verify_stack_chunk frame: %d sp: " INTPTR_FORMAT " pc: " PTR_FORMAT " interpreted: %d size: %d argsize: %d oops: %d", _num_frames, f.sp() - _chunk->start_address(), p2i(f.pc()), f.is_interpreted(), fsize, _argsize, num_oops);
544 LogTarget(Trace, continuations) lt;
545 if (lt.develop_is_enabled()) {
546 LogStream ls(lt);
547 f.print_on(&ls);
548 }
549 assert(f.pc() != nullptr,
550 "young: %d num_frames: %d sp: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT,
551 !_chunk->requires_barriers(), _num_frames, p2i(f.sp()), p2i(_chunk->start_address()), p2i(_chunk->bottom_address()));
552
553 if (_num_frames == 0) {
|