1 /* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
  2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  3  *
  4  * This code is free software; you can redistribute it and/or modify it
  5  * under the terms of the GNU General Public License version 2 only, as
  6  * published by the Free Software Foundation.
  7  *
  8  * This code is distributed in the hope that it will be useful, but WITHOUT
  9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 11  * version 2 for more details (a copy is included in the LICENSE file that
 12  * accompanied this code).
 13  *
 14  * You should have received a copy of the GNU General Public License version
 15  * 2 along with this work; if not, write to the Free Software Foundation,
 16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 17  *
 18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 19  * or visit www.oracle.com if you need additional information or have any
 20  * questions.
 21  *
 22  */
 23 
 24 #ifndef SHARE_OOPS_INSTANCESTACKCHUNKKLASS_INLINE_HPP
 25 #define SHARE_OOPS_INSTANCESTACKCHUNKKLASS_INLINE_HPP
 26 
 27 #include "oops/instanceStackChunkKlass.hpp"
 28 
 29 #include "classfile/javaClasses.hpp"
 30 #include "code/codeBlob.hpp"
 31 #include "code/codeCache.inline.hpp"
 32 #include "code/nativeInst.hpp"
 33 #include "compiler/oopMap.hpp"
 34 #include "gc/shared/barrierSetNMethod.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "gc/shared/gc_globals.hpp"
 37 #include "logging/log.hpp"
 38 #include "memory/iterator.inline.hpp"
 39 #include "oops/instanceKlass.inline.hpp"
 40 #include "oops/klass.hpp"
 41 #include "oops/oop.inline.hpp"
 42 #include "oops/stackChunkOop.inline.hpp"
 43 #include "runtime/frame.inline.hpp"
 44 #include "runtime/globals.hpp"
 45 #include "runtime/handles.inline.hpp"
 46 #include "utilities/bitMap.hpp"
 47 #include "utilities/bitMap.inline.hpp"
 48 #include "utilities/debug.hpp"
 49 #include "utilities/globalDefinitions.hpp"
 50 #include "utilities/macros.hpp"
 51 
 52 #include CPU_HEADER_INLINE(instanceStackChunkKlass)
 53 
 54 #if INCLUDE_ZGC
 55 #include "gc/z/zAddress.inline.hpp"
 56 #define FIX_DERIVED_POINTERS true
 57 #endif
 58 #if INCLUDE_SHENANDOAHGC
 59 #define FIX_DERIVED_POINTERS true
 60 #endif
 61 
 62 #ifdef ASSERT
 63 extern "C" bool dbg_is_safe(const void* p, intptr_t errvalue);
 64 extern "C" JNIEXPORT void pns2();
 65 #endif
 66 
 67 const int TwoWordAlignmentMask  = (1 << (LogBytesPerWord+1)) - 1;
 68 
 69 inline void copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size);
 70 inline void copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size);
 71 
 72 template <bool dword_aligned>
 73 inline void InstanceStackChunkKlass::copy_from_stack_to_chunk(void* from, void* to, size_t size) {
 74   memcpy(to, from, size << LogBytesPerWord);
 75 #if 0
 76   if (dword_aligned) {
 77     assert (size >= 2, ""); // one word for return address, another for rbp spill
 78     assert(((intptr_t)from & TwoWordAlignmentMask) == 0, "");
 79     assert(((intptr_t)to   & WordAlignmentMask)    == 0, "");
 80 
 81     memcpy_fn_from_stack_to_chunk(from, to, size);
 82   } else {
 83     default_memcpy(from, to, size);
 84   }
 85 #endif
 86 }
 87 
 88 template <bool dword_aligned>
 89 inline void InstanceStackChunkKlass::copy_from_chunk_to_stack(void* from, void* to, size_t size) {
 90   memcpy(to, from, size << LogBytesPerWord);
 91 #if 0
 92   if (dword_aligned) {
 93     assert (size >= 2, ""); // one word for return address, another for rbp spill
 94     assert(((intptr_t)from & WordAlignmentMask)    == 0, "");
 95     assert(((intptr_t)to   & TwoWordAlignmentMask) == 0, "");
 96 
 97     memcpy_fn_from_chunk_to_stack(from, to, size);
 98   } else {
 99     default_memcpy(from, to, size);
100   }
101 #endif
102 }
103 
104 template <bool mixed>
105 StackChunkFrameStream<mixed>::StackChunkFrameStream(stackChunkOop chunk, bool gc) DEBUG_ONLY(: _chunk(chunk)) {
106   assert (chunk->is_stackChunk(), "");
107   assert (mixed || !chunk->has_mixed_frames(), "");
108 
109   DEBUG_ONLY(_index = 0;)
110   _end = chunk->bottom_address();
111   _sp = chunk->start_address() + get_initial_sp(chunk, gc);
112   assert (_sp <= chunk->end_address() + InstanceStackChunkKlass::metadata_words(), "");
113 
114   get_cb();
115 
116   if (mixed) {
117     if (!is_done() && is_interpreted()) {
118       _unextended_sp = unextended_sp_for_interpreter_frame();
119     } else {
120       _unextended_sp = _sp;
121     }
122     assert (_unextended_sp >= _sp - InstanceStackChunkKlass::metadata_words(), "");
123     // else if (is_compiled()) {
124     //   tty->print_cr(">>>>> XXXX"); os::print_location(tty, (intptr_t)nativeCall_before(pc())->destination());
125     //   assert (NativeCall::is_call_before(pc()) && nativeCall_before(pc()) != nullptr && nativeCall_before(pc())->destination() != nullptr, "");
126     //   if (Interpreter::contains(nativeCall_before(pc())->destination())) { // interpreted callee
127     //     _unextended_sp = unextended_sp_for_interpreter_frame_caller();
128     //   }
129     // }
130   }
131   DEBUG_ONLY(else _unextended_sp = nullptr;)
132   
133   if (is_stub()) {
134     get_oopmap(pc(), 0);
135     DEBUG_ONLY(_has_stub = true);
136   } DEBUG_ONLY(else _has_stub = false;)
137 }
138 
139 template <bool mixed>
140 StackChunkFrameStream<mixed>::StackChunkFrameStream(stackChunkOop chunk, const frame& f) DEBUG_ONLY(: _chunk(chunk)) {
141   assert (chunk->is_stackChunk(), "");
142   assert (mixed || !chunk->has_mixed_frames(), "");
143 
144   DEBUG_ONLY(_index = 0;)
145   
146   _end = chunk->bottom_address();
147 
148   assert (chunk->is_in_chunk(f.sp()), "");
149   _sp = f.sp();
150   if (mixed) {
151     _unextended_sp = f.unextended_sp();
152     assert (_unextended_sp >= _sp - InstanceStackChunkKlass::metadata_words(), "");
153   }
154   DEBUG_ONLY(else _unextended_sp = nullptr;)
155   assert (_sp >= chunk->start_address() && _sp <= chunk->end_address() + InstanceStackChunkKlass::metadata_words(), "");
156 
157   if (f.cb() != nullptr) {
158     _oopmap = nullptr;
159     _cb = f.cb();
160   } else {
161     get_cb();
162   }
163 
164   if (is_stub()) {
165     get_oopmap(pc(), 0);
166     DEBUG_ONLY(_has_stub = true);
167   } DEBUG_ONLY(else _has_stub = false;)
168 }
169 
170 template <bool mixed>
171 inline bool StackChunkFrameStream<mixed>::is_stub() const {
172   return cb() != nullptr && (_cb->is_safepoint_stub() || _cb->is_runtime_stub());
173 }
174 
175 template <bool mixed>
176 inline bool StackChunkFrameStream<mixed>::is_compiled() const {
177   return cb() != nullptr && _cb->is_compiled();
178 }
179 
180 template <bool mixed>
181 inline bool StackChunkFrameStream<mixed>::is_interpreted() const { 
182   return mixed ? (!is_done() && Interpreter::contains(pc())) : false; 
183 }
184 
185 template <bool mixed>
186 inline int StackChunkFrameStream<mixed>::frame_size() const {
187   return is_interpreted() ? interpreter_frame_size() 
188                           : cb()->frame_size() + stack_argsize();
189 }
190 
191 template <bool mixed>
192 inline int StackChunkFrameStream<mixed>::stack_argsize() const {
193   if (is_interpreted()) return interpreter_frame_stack_argsize();
194   if (is_stub()) return 0;
195   guarantee (cb() != nullptr, "");
196   guarantee (cb()->is_compiled(), "");
197   guarantee (cb()->as_compiled_method()->method() != nullptr, "");
198   return (cb()->as_compiled_method()->method()->num_stack_arg_slots() * VMRegImpl::stack_slot_size) >> LogBytesPerWord;
199 }
200 
201 template <bool mixed>
202 inline int StackChunkFrameStream<mixed>::num_oops() const {
203   return is_interpreted() ? interpreter_frame_num_oops() : oopmap()->num_oops();
204 }
205 
206 template <bool mixed>
207 inline void StackChunkFrameStream<mixed>::initialize_register_map(RegisterMap* map) {
208   update_reg_map_pd(map);
209 }
210 
211 template <bool mixed>
212 template <typename RegisterMapT>
213 inline void StackChunkFrameStream<mixed>::next(RegisterMapT* map) {
214   update_reg_map(map);
215   bool safepoint = is_stub();
216   if (mixed) {
217     if (is_interpreted()) next_for_interpreter_frame();
218     else {
219       _sp = _unextended_sp + cb()->frame_size();
220       if (_sp >= _end - InstanceStackChunkKlass::metadata_words()) {
221         _sp = _end;
222       }
223       _unextended_sp = is_interpreted() ? unextended_sp_for_interpreter_frame() : _sp;
224     }
225     assert (_unextended_sp >= _sp - InstanceStackChunkKlass::metadata_words(), "");
226   } else {
227     _sp += cb()->frame_size();
228   }
229   assert (!is_interpreted() || _unextended_sp == unextended_sp_for_interpreter_frame(), "_unextended_sp: " INTPTR_FORMAT " unextended_sp: " INTPTR_FORMAT, p2i(_unextended_sp), p2i(unextended_sp_for_interpreter_frame()));
230 
231   get_cb();
232   update_reg_map_pd(map);
233   if (safepoint && cb() != nullptr) _oopmap = cb()->oop_map_for_return_address(pc()); // there's no post-call nop and no fast oopmap lookup
234   DEBUG_ONLY(_index++;)
235 }
236 
237 template <bool mixed>
238 inline intptr_t* StackChunkFrameStream<mixed>::next_sp() const {
239   return is_interpreted() ? next_sp_for_interpreter_frame() : unextended_sp() + cb()->frame_size();
240 }
241 
242 template <bool mixed>
243 inline void StackChunkFrameStream<mixed>::get_cb() {
244   _oopmap = nullptr;
245   if (is_done() || is_interpreted()) {
246     _cb = nullptr;
247     return;
248   }
249 
250   assert (pc() != nullptr && dbg_is_safe(pc(), -1), 
251   "index: %d sp: " INTPTR_FORMAT " sp offset: %d end offset: %d size: %d chunk sp: %d", 
252   _index, p2i(sp()), _chunk->to_offset(sp()), _chunk->to_offset(_chunk->bottom_address()), _chunk->stack_size(), _chunk->sp());
253 
254   _cb = CodeCache::find_blob_fast(pc());
255 
256   // if (_cb == nullptr) { tty->print_cr("OOPS"); os::print_location(tty, (intptr_t)pc()); }
257   assert (_cb != nullptr, 
258     "index: %d sp: " INTPTR_FORMAT " sp offset: %d end offset: %d size: %d chunk sp: %d gc_flag: %d", 
259     _index, p2i(sp()), _chunk->to_offset(sp()), _chunk->to_offset(_chunk->bottom_address()), _chunk->stack_size(), _chunk->sp(), _chunk->is_gc_mode());
260   assert (is_interpreted() || ((is_stub() || is_compiled()) && _cb->frame_size() > 0), 
261     "index: %d sp: " INTPTR_FORMAT " sp offset: %d end offset: %d size: %d chunk sp: %d is_stub: %d is_compiled: %d frame_size: %d mixed: %d", 
262     _index, p2i(sp()), _chunk->to_offset(sp()), _chunk->to_offset(_chunk->bottom_address()), _chunk->stack_size(), _chunk->sp(), is_stub(), is_compiled(), _cb->frame_size(), mixed);
263 }
264 
265 template <bool mixed>
266 inline void StackChunkFrameStream<mixed>::get_oopmap() const {
267   if (is_interpreted()) return;
268   assert (is_compiled(), "");
269   get_oopmap(pc(), CodeCache::find_oopmap_slot_fast(pc())); 
270 }
271 
272 template <bool mixed>
273 inline void StackChunkFrameStream<mixed>::get_oopmap(address pc, int oopmap_slot) const {
274   assert (cb() != nullptr, "");
275   assert (!is_compiled() || !cb()->as_compiled_method()->is_deopt_pc(pc), "oopmap_slot: %d", oopmap_slot);
276   if (oopmap_slot >= 0) {
277     assert (oopmap_slot >= 0, "");
278     assert (cb()->oop_map_for_slot(oopmap_slot, pc) != nullptr, "");
279     assert (cb()->oop_map_for_slot(oopmap_slot, pc) == cb()->oop_map_for_return_address(pc), "");
280 
281     _oopmap = cb()->oop_map_for_slot(oopmap_slot, pc);
282   } else {
283     _oopmap = cb()->oop_map_for_return_address(pc);
284   }
285   assert (_oopmap != nullptr, "");
286 }
287 
288 template <bool mixed>
289 inline int StackChunkFrameStream<mixed>::get_initial_sp(stackChunkOop chunk, bool gc) {
290   int chunk_sp = chunk->sp();
291   // we don't invoke write barriers on oops in thawed frames, so we use the gcSP field to traverse thawed frames
292   // if (gc && chunk_sp != chunk->gc_sp() && chunk->requires_barriers()) {
293   //   uint64_t marking_cycle = CodeCache::marking_cycle() >> 1;
294   //   uint64_t chunk_marking_cycle = chunk->mark_cycle() >> 1;
295   //   if (marking_cycle == chunk_marking_cycle) {
296   //     // Marking isn't finished, so we need to traverse thawed frames
297   //     chunk_sp = chunk->gc_sp();
298   //     assert (chunk_sp >= 0 && chunk_sp <= chunk->sp(), "");
299   //   } else {
300   //     chunk->set_gc_sp(chunk_sp); // atomic; benign race
301   //   }
302   // }
303   assert (chunk_sp >= 0, "");
304   return chunk_sp;
305 }
306 
307 template <bool mixed>
308 template <typename RegisterMapT>
309 inline void* StackChunkFrameStream<mixed>::reg_to_loc(VMReg reg, const RegisterMapT* map) const {
310   assert (!is_done(), "");
311   return reg->is_reg() ? (void*)map->location(reg, sp()) // see frame::update_map_with_saved_link(&map, link_addr);
312                        : (void*)((address)unextended_sp() + (reg->reg2stack() * VMRegImpl::stack_slot_size));
313 }
314 
315 template<>
316 template<>
317 inline void StackChunkFrameStream<true>::update_reg_map(RegisterMap* map) {
318   assert (!map->in_cont() || map->stack_chunk() == _chunk, "");
319   if (map->update_map() && is_stub()) {
320     frame f = to_frame();
321     oopmap()->update_register_map(&f, map); // we have callee-save registers in this case
322   }
323 }
324 
325 template<>
326 template<>
327 inline void StackChunkFrameStream<false>::update_reg_map(RegisterMap* map) {
328   assert (map->in_cont() && map->stack_chunk()() == _chunk, "");
329   if (map->update_map()) {
330     frame f = to_frame();
331     oopmap()->update_register_map(&f, map); // we have callee-save registers in this case
332   }
333 }
334 
335 template <bool mixed>
336 template <typename RegisterMapT>
337 inline void StackChunkFrameStream<mixed>::update_reg_map(RegisterMapT* map) {}
338 
339 template <bool mixed>
340 inline address  StackChunkFrameStream<mixed>::orig_pc() const {
341   address pc1 = pc();
342   if (is_interpreted() || is_stub()) return pc1;
343   CompiledMethod* cm = cb()->as_compiled_method();
344   if (cm->is_deopt_pc(pc1)) {
345     pc1 = *(address*)((address)unextended_sp() + cm->orig_pc_offset());
346   }
347 
348   assert (pc1 != nullptr && !cm->is_deopt_pc(pc1),
349           "index: %d sp - start: " INTPTR_FORMAT " end - sp: " INTPTR_FORMAT " size: %d sp: %d",
350           _index, sp() - _chunk->sp_address(), end() - sp(), _chunk->stack_size(), _chunk->sp());
351   assert (_cb == CodeCache::find_blob_fast(pc1), "");
352 
353   return pc1;
354 }
355 
356 #ifdef ASSERT
357 template <bool mixed>
358 bool StackChunkFrameStream<mixed>::is_deoptimized() const {
359   address pc1 = pc();
360   return is_compiled() && CodeCache::find_oopmap_slot_fast(pc1) < 0 && cb()->as_compiled_method()->is_deopt_pc(pc1);
361 }
362 #endif
363 
364 template <bool mixed>
365 void StackChunkFrameStream<mixed>::handle_deopted() const {
366   assert (!is_done(), "");
367 
368   if (_oopmap != nullptr) return;
369   if (is_interpreted()) return;
370   assert (is_compiled(), "");
371 
372   address pc1 = pc();
373   int oopmap_slot = CodeCache::find_oopmap_slot_fast(pc1);
374   if (UNLIKELY(oopmap_slot < 0)) { // we could have marked frames for deoptimization in thaw_chunk
375     if (cb()->as_compiled_method()->is_deopt_pc(pc1)) {
376       pc1 = orig_pc();
377       oopmap_slot = CodeCache::find_oopmap_slot_fast(pc1);
378     }
379   }
380   get_oopmap(pc1, oopmap_slot);
381 }
382 
383 template <bool mixed>
384 template <class OopClosureType, class RegisterMapT>
385 inline void StackChunkFrameStream<mixed>::iterate_oops(OopClosureType* closure, const RegisterMapT* map) const {
386   if (is_interpreted()) {
387     frame f = to_frame();
388     // InterpreterOopMap mask;
389     // f.interpreted_frame_oop_map(&mask);
390     f.oops_interpreted_do<true>(closure, nullptr, true);
391   } else {
392     DEBUG_ONLY(int oops = 0;)
393     for (OopMapStream oms(oopmap()); !oms.is_done(); oms.next()) { // see void OopMapDo<OopFnT, DerivedOopFnT, ValueFilterT>::iterate_oops_do
394       OopMapValue omv = oms.current();
395       if (omv.type() != OopMapValue::oop_value && omv.type() != OopMapValue::narrowoop_value)
396         continue;
397 
398       assert (UseCompressedOops || omv.type() == OopMapValue::oop_value, "");
399       DEBUG_ONLY(oops++;)
400 
401       void* p = reg_to_loc(omv.reg(), map);
402       assert (p != nullptr, "");
403       assert ((_has_stub && _index == 1) || is_in_frame(p), "");
404 
405       // if ((intptr_t*)p >= end) continue; // we could be walking the bottom frame's stack-passed args, belonging to the caller
406 
407       // if (!SkipNullValue::should_skip(*p))
408       log_develop_trace(jvmcont)("StackChunkFrameStream::iterate_oops narrow: %d reg: %s p: " INTPTR_FORMAT " sp offset: " INTPTR_FORMAT, omv.type() == OopMapValue::narrowoop_value, omv.reg()->name(), p2i(p), (intptr_t*)p - sp());
409       omv.type() == OopMapValue::narrowoop_value ? Devirtualizer::do_oop(closure, (narrowOop*)p) : Devirtualizer::do_oop(closure, (oop*)p);
410     }
411     assert (oops == oopmap()->num_oops(), "oops: %d oopmap->num_oops(): %d", oops, oopmap()->num_oops());
412   }
413 }
414 
415 template<bool mixed>
416 template <class DerivedOopClosureType, class RegisterMapT>
417 inline void StackChunkFrameStream<mixed>::iterate_derived_pointers(DerivedOopClosureType* closure, const RegisterMapT* map) const {
418   if (is_interpreted()) return;
419   
420   for (OopMapStream oms(oopmap()); !oms.is_done(); oms.next()) {
421     OopMapValue omv = oms.current();
422     if (omv.type() != OopMapValue::derived_oop_value)
423       continue;
424     
425     intptr_t* derived_loc = (intptr_t*)reg_to_loc(omv.reg(), map);
426     intptr_t* base_loc    = (intptr_t*)reg_to_loc(omv.content_reg(), map); // see OopMapDo<OopMapFnT, DerivedOopFnT, ValueFilterT>::walk_derived_pointers1
427 
428     assert ((_has_stub && _index == 1) || is_in_frame(base_loc), "");
429     assert ((_has_stub && _index == 1) || is_in_frame(derived_loc), "");
430     assert (derived_loc != base_loc, "Base and derived in same location");
431     assert (is_in_oops(base_loc, map), "not found: " INTPTR_FORMAT, p2i(base_loc));
432     assert (!is_in_oops(derived_loc, map), "found: " INTPTR_FORMAT, p2i(derived_loc));
433     
434     Devirtualizer::do_derived_oop(closure, (oop*)base_loc, (derived_pointer*)derived_loc);
435   }
436   OrderAccess::storestore(); // to preserve that we set the offset *before* fixing the base oop
437 }
438 
439 #ifdef ASSERT
440 
441 template <bool mixed>
442 template <typename RegisterMapT>
443 bool StackChunkFrameStream<mixed>::is_in_oops(void* p, const RegisterMapT* map) const {
444   for (OopMapStream oms(oopmap()); !oms.is_done(); oms.next()) {
445     if (oms.current().type() != OopMapValue::oop_value)
446       continue;
447     if (reg_to_loc(oms.current().reg(), map) == p)
448       return true;
449   }
450   return false;
451 }
452 #endif
453 
454 #ifdef ASSERT
455 void InstanceStackChunkKlass::assert_mixed_correct(stackChunkOop chunk, bool mixed) {
456   assert (!chunk->has_mixed_frames() || mixed, "has mixed frames: %d mixed: %d", chunk->has_mixed_frames(), mixed);
457 }
458 #endif
459 
460 inline int InstanceStackChunkKlass::instance_size(int stack_size_in_words) const {
461   return align_object_size(size_helper() + stack_size_in_words + bitmap_size(stack_size_in_words));
462 }
463 
464 inline HeapWord* InstanceStackChunkKlass::start_of_bitmap(oop obj) {
465   return start_of_stack(obj) + jdk_internal_vm_StackChunk::size(obj);
466 }
467 
468 inline int InstanceStackChunkKlass::bitmap_size(int stack_size_in_words) {
469   if (!UseChunkBitmaps) return 0;
470   int size_in_bits = bitmap_size_in_bits(stack_size_in_words);
471   static const int mask = BitsPerWord - 1;
472   int remainder = (size_in_bits & mask) != 0 ? 1 : 0;
473   int res = (size_in_bits >> LogBitsPerWord) + remainder;
474   assert (size_in_bits + (int)bit_offset(stack_size_in_words) == (res << LogBitsPerWord), "size_in_bits: %d bit_offset: %d res << LogBitsPerWord: %d", size_in_bits, (int)bit_offset(stack_size_in_words), (res << LogBitsPerWord));
475   return res;
476 }
477 
478 inline BitMap::idx_t InstanceStackChunkKlass::bit_offset(int stack_size_in_words) {
479   static const int mask = BitsPerWord - 1;
480   // tty->print_cr(">>> BitsPerWord: %d MASK: %d stack_size_in_words: %d stack_size_in_words & mask: %d", BitsPerWord, mask, stack_size_in_words, stack_size_in_words & mask);
481   return (BitMap::idx_t)((BitsPerWord - (bitmap_size_in_bits(stack_size_in_words) & mask)) & mask);
482 }
483 
484 template <typename T, class OopClosureType>
485 void InstanceStackChunkKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
486   assert (obj->is_stackChunk(), "");
487   stackChunkOop chunk = (stackChunkOop)obj;
488   if (Devirtualizer::do_metadata(closure)) {
489     Devirtualizer::do_klass(closure, this);
490   }
491   (UseZGC || UseShenandoahGC)
492     ? oop_oop_iterate_stack<true,  OopClosureType>(chunk, closure)
493     : oop_oop_iterate_stack<false, OopClosureType>(chunk, closure);
494   oop_oop_iterate_header<T>(chunk, closure);
495 }
496 
497 template <typename T, class OopClosureType>
498 void InstanceStackChunkKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
499   assert (obj->is_stackChunk(), "");
500   assert(!Devirtualizer::do_metadata(closure), "Code to handle metadata is not implemented");
501   stackChunkOop chunk = (stackChunkOop)obj;
502   (UseZGC || UseShenandoahGC)
503     ? oop_oop_iterate_stack<true,  OopClosureType>(chunk, closure)
504     : oop_oop_iterate_stack<false, OopClosureType>(chunk, closure);
505   oop_oop_iterate_header<T>(chunk, closure);
506 }
507 
508 template <typename T, class OopClosureType>
509 void InstanceStackChunkKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
510   assert (obj->is_stackChunk(), "");
511   stackChunkOop chunk = (stackChunkOop)obj;
512   if (Devirtualizer::do_metadata(closure)) {
513     if (mr.contains(obj)) {
514       Devirtualizer::do_klass(closure, this);
515     }
516   }
517   // InstanceKlass::oop_oop_iterate_bounded<T>(obj, closure, mr);
518   oop_oop_iterate_stack_bounded<false>(chunk, closure, mr);
519   oop_oop_iterate_header_bounded<T>(chunk, closure, mr);
520 }
521 
522 template <typename T, class OopClosureType>
523 void InstanceStackChunkKlass::oop_oop_iterate_header(stackChunkOop chunk, OopClosureType* closure) {
524   T* parent_addr = chunk->field_addr<T>(jdk_internal_vm_StackChunk::parent_offset());
525   T* cont_addr = chunk->field_addr<T>(jdk_internal_vm_StackChunk::cont_offset());
526   OrderAccess::storestore();
527   Devirtualizer::do_oop(closure, parent_addr);
528   OrderAccess::storestore();
529   Devirtualizer::do_oop(closure, cont_addr); // must be last oop iterated
530 }
531 
532 template <typename T, class OopClosureType>
533 void InstanceStackChunkKlass::oop_oop_iterate_header_bounded(stackChunkOop chunk, OopClosureType* closure, MemRegion mr) {
534   T* parent_addr = chunk->field_addr<T>(jdk_internal_vm_StackChunk::parent_offset());
535   T* cont_addr = chunk->field_addr<T>(jdk_internal_vm_StackChunk::cont_offset());
536   if (mr.contains(parent_addr)) {
537     OrderAccess::storestore();
538     Devirtualizer::do_oop(closure, parent_addr);
539   }
540   if (mr.contains(cont_addr)) {
541     OrderAccess::storestore();
542     Devirtualizer::do_oop(closure, cont_addr); // must be last oop iterated
543   }
544 }
545 
546 template <bool concurrent_gc, class OopClosureType>
547 void InstanceStackChunkKlass::oop_oop_iterate_stack_bounded(stackChunkOop chunk, OopClosureType* closure, MemRegion mr) {
548   if (LIKELY(chunk->has_bitmap())) {
549     intptr_t* start = chunk->sp_address() - metadata_words();
550     intptr_t* end = chunk->end_address();
551     if ((intptr_t*)mr.start() > start) start = (intptr_t*)mr.start();
552     if ((intptr_t*)mr.end()   < end)   end   = (intptr_t*)mr.end();
553     oop_oop_iterate_stack_helper(chunk, closure, start, end);
554   } else {
555     oop_oop_iterate_stack_slow<concurrent_gc>(chunk, closure, mr);
556   }
557 }
558 
559 template <bool concurrent_gc, class OopClosureType>
560 void InstanceStackChunkKlass::oop_oop_iterate_stack(stackChunkOop chunk, OopClosureType* closure) {
561   if (LIKELY(chunk->has_bitmap())) {
562     oop_oop_iterate_stack_helper(chunk, closure, chunk->sp_address() - metadata_words(), chunk->end_address());
563   } else {
564     oop_oop_iterate_stack_slow<concurrent_gc>(chunk, closure, chunk->range());
565   }
566 }
567 
568 template <typename OopT, typename OopClosureType>
569 class StackChunkOopIterateBitmapClosure : public BitMapClosure {
570   stackChunkOop _chunk;
571   OopClosureType* const _closure;
572 public:
573   StackChunkOopIterateBitmapClosure(stackChunkOop chunk, OopClosureType* closure) : _chunk(chunk), _closure(closure) {}
574   bool do_bit(BitMap::idx_t index) override {
575     Devirtualizer::do_oop(_closure, _chunk->address_for_bit<OopT>(index));
576     return true;
577   }
578 };
579 
580 template <class OopClosureType>
581 void InstanceStackChunkKlass::oop_oop_iterate_stack_helper(stackChunkOop chunk, OopClosureType* closure, intptr_t* start, intptr_t* end) {
582   if (Devirtualizer::do_metadata(closure)) {
583     mark_methods(chunk, closure);
584   }
585 
586   if (UseCompressedOops) {
587     StackChunkOopIterateBitmapClosure<narrowOop, OopClosureType> bitmap_closure(chunk, closure);
588     chunk->bitmap().iterate(&bitmap_closure, chunk->bit_index_for((narrowOop*)start), chunk->bit_index_for((narrowOop*)end));
589   } else {
590     StackChunkOopIterateBitmapClosure<oop, OopClosureType> bitmap_closure(chunk, closure);
591     chunk->bitmap().iterate(&bitmap_closure, chunk->bit_index_for((oop*)start), chunk->bit_index_for((oop*)end));
592   }
593 }
594 
595 template <bool mixed, class StackChunkFrameClosureType>
596 inline void InstanceStackChunkKlass::iterate_stack(stackChunkOop obj, StackChunkFrameClosureType* closure) {
597   // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " mixed: %d", p2i(this), mixed);
598 
599   const SmallRegisterMap* map = SmallRegisterMap::instance;
600   assert (!map->in_cont(), "");
601 
602   StackChunkFrameStream<mixed> f(obj);
603   // if (f.end() > h) {
604   //   // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " bounded", p2i(this));
605   //   f.set_end(h);
606   // }
607   bool should_continue = true;
608 
609   if (f.is_stub()) {
610     // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " safepoint yield stub frame: %d", p2i(this), f.index());
611     // if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
612 
613     RegisterMap full_map((JavaThread*)nullptr, true, false, true);
614     full_map.set_include_argument_oops(false);
615     
616     f.next(&full_map);
617 
618     // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " safepoint yield caller frame: %d", p2i(this), f.index());
619 
620     assert (!f.is_done(), "");
621     assert (f.is_compiled(), "");
622 
623     // if (f.sp() + f.frame_size() >= l) {
624       // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " stub-caller frame: %d", p2i(this), f.index());
625       // if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
626 
627       should_continue = closure->template do_frame<mixed>((const StackChunkFrameStream<mixed>&)f, &full_map);
628     // }
629     f.next(map);
630   }
631   assert (!f.is_stub(), "");
632 
633   for(; should_continue && !f.is_done(); f.next(map)) {
634     // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " frame: %d interpreted: %d", p2i(this), f.index(), f.is_interpreted());
635     // if (log_develop_is_enabled(Trace, jvmcont)) f.print_on(tty);
636     if (mixed) f.handle_deopted(); // in slow mode we might freeze deoptimized frames
637     should_continue = closure->template do_frame<mixed>((const StackChunkFrameStream<mixed>&)f, map);
638     // if (!should_continue) log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " stop", p2i(this));
639   }
640   // log_develop_trace(jvmcont)("stackChunkOopDesc::iterate_stack this: " INTPTR_FORMAT " done index: %d", p2i(this), f.index());
641 }
642 
643 #endif // SHARE_OOPS_INSTANCESTACKCHUNKKLASS_INLINE_HPP