< prev index next >

src/hotspot/share/oops/stackChunkOop.inline.hpp

Print this page

  1 /*
  2  * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 27 
 28 #include "oops/stackChunkOop.hpp"
 29 
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/barrierSet.hpp"
 32 #include "gc/shared/barrierSetStackChunk.hpp"
 33 #include "gc/shared/gc_globals.hpp"
 34 #include "memory/memRegion.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/access.inline.hpp"
 37 #include "oops/instanceStackChunkKlass.inline.hpp"
 38 #include "runtime/continuationJavaClasses.inline.hpp"
 39 #include "runtime/frame.inline.hpp"
 40 #include "runtime/globals.hpp"
 41 #include "runtime/handles.inline.hpp"

 42 #include "runtime/registerMap.hpp"
 43 #include "runtime/smallRegisterMap.inline.hpp"
 44 #include "utilities/macros.hpp"
 45 #include CPU_HEADER_INLINE(stackChunkOop)
 46 
 47 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
 48 
 49 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
 50   assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
 51   return stackChunkOop(obj);
 52 }
 53 
 54 inline stackChunkOop stackChunkOopDesc::parent() const         { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
 55 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
 56 template<typename P>
 57 inline void stackChunkOopDesc::set_parent_raw(oop value)       { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
 58 template<DecoratorSet decorators>
 59 inline void stackChunkOopDesc::set_parent_access(oop value)    { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
 60 
 61 inline int stackChunkOopDesc::stack_size() const        { return jdk_internal_vm_StackChunk::size(as_oop()); }

 71 
 72 inline uint8_t stackChunkOopDesc::flags() const         { return jdk_internal_vm_StackChunk::flags(as_oop()); }
 73 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
 74 
 75 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
 76 
 77 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
 78   jdk_internal_vm_StackChunk::release_set_flags(this, value);
 79 }
 80 
 81 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
 82   return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
 83 }
 84 
 85 inline int stackChunkOopDesc::max_thawing_size() const          { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
 86 inline void stackChunkOopDesc::set_max_thawing_size(int value)  {
 87   assert(value >= 0, "size must be >= 0");
 88   jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
 89 }
 90 






 91 inline oop stackChunkOopDesc::cont() const                {
 92   if (UseZGC && !ZGenerational) {
 93     assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
 94     // The state of the cont oop is used by XCollectedHeap::requires_barriers,
 95     // to determine the age of the stackChunkOopDesc. For that to work, it is
 96     // only the GC that is allowed to perform a load barrier on the oop.
 97     // This function is used by non-GC code and therfore create a stack-local
 98     // copy on the oop and perform the load barrier on that copy instead.
 99     oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
100     obj = (oop)NativeAccess<>::oop_load(&obj);
101     return obj;
102   }
103   return jdk_internal_vm_StackChunk::cont(as_oop());
104 }
105 inline void stackChunkOopDesc::set_cont(oop value)        { jdk_internal_vm_StackChunk::set_cont(this, value); }
106 template<typename P>
107 inline void stackChunkOopDesc::set_cont_raw(oop value)    { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
108 template<DecoratorSet decorators>
109 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
110 

150   HeapWord* end = start + stack_size();
151   return (HeapWord*)p >= start && (HeapWord*)p < end;
152 }
153 
154 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
155   return (flags() & flag) != 0;
156 }
157 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
158   return (flags_acquire() & flag) != 0;
159 }
160 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
161   uint32_t flags = this->flags();
162   set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
163 }
164 inline void stackChunkOopDesc::clear_flags() {
165   set_flags(0);
166 }
167 
168 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
169 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
170   assert((flags() & ~FLAG_HAS_INTERPRETED_FRAMES) == 0, "other flags should not be set");
171   set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
172 }
173 






















174 inline bool stackChunkOopDesc::is_gc_mode() const                  { return is_flag(FLAG_GC_MODE); }
175 inline bool stackChunkOopDesc::is_gc_mode_acquire() const          { return is_flag_acquire(FLAG_GC_MODE); }
176 inline void stackChunkOopDesc::set_gc_mode(bool value)             { set_flag(FLAG_GC_MODE, value); }
177 
178 inline bool stackChunkOopDesc::has_bitmap() const                  { return is_flag(FLAG_HAS_BITMAP); }
179 inline void stackChunkOopDesc::set_has_bitmap(bool value)          { set_flag(FLAG_HAS_BITMAP, value); }
180 
181 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
182 
183 inline bool stackChunkOopDesc::requires_barriers() {
184   return Universe::heap()->requires_barriers(this);
185 }
186 
187 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
188 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
189   if (frame_kind == ChunkFrames::Mixed) {
190     // we could freeze deopted frames in slow mode.
191     f.handle_deopted();
192   }
193   do_barriers0<barrier>(f, map);
194 }
195 










196 template <class StackChunkFrameClosureType>
197 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
198   has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
199                      : iterate_stack<ChunkFrames::CompiledOnly>(closure);
200 }
201 
202 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
203 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
204   const SmallRegisterMap* map = SmallRegisterMap::instance();
205   assert(!map->in_cont(), "");
206 
207   StackChunkFrameStream<frame_kind> f(this);
208   bool should_continue = true;
209 
210   if (f.is_stub()) {
211     RegisterMap full_map(nullptr,
212                          RegisterMap::UpdateMap::include,
213                          RegisterMap::ProcessFrames::skip,
214                          RegisterMap::WalkContinuation::include);
215     full_map.set_include_argument_oops(false);

216 
217     f.next(&full_map);
218 
219     assert(!f.is_done(), "");
220     assert(f.is_compiled(), "");
221 
222     should_continue = closure->do_frame(f, &full_map);
223     f.next(map);
224     f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call)
225   }
226   assert(!f.is_stub(), "");
227 
228   for(; should_continue && !f.is_done(); f.next(map)) {
229     if (frame_kind == ChunkFrames::Mixed) {
230       // in slow mode we might freeze deoptimized frames
231       f.handle_deopted();
232     }
233     should_continue = closure->do_frame(f, map);
234   }
235 }
236 
237 inline frame stackChunkOopDesc::relativize(frame fr)   const { relativize_frame(fr);   return fr; }
238 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
239 
240 inline void* stackChunkOopDesc::gc_data() const {
241   int stack_sz = stack_size();
242   assert(stack_sz != 0, "stack should not be empty");
243 
244   // The gc data is located after the stack.

265 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
266   assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
267   assert(p >= (OopT*)start_address(), "Address not in chunk");
268   return p - (OopT*)start_address();
269 }
270 
271 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
272   return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
273 }
274 
275 template <typename OopT>
276 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
277   return (OopT*)start_address() + index;
278 }
279 
280 inline MemRegion stackChunkOopDesc::range() {
281   return MemRegion((HeapWord*)this, size());
282 }
283 
284 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
285   assert(fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
286   assert(is_in_chunk(fr.unextended_sp()), "");
287 
288   intptr_t* base = fr.real_fp(); // equal to the caller's sp
289   intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
290   assert(base > loc, "");
291   return (int)(base - loc);
292 }
293 
294 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
295   assert(fr.is_compiled_frame(), "");
296   return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
297 }
298 
299 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
300   assert(fr.is_compiled_frame(), "");
301   assert(map != nullptr, "");
302   assert(map->stack_chunk() == as_oop(), "");
303 
304   // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
305   intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case

  1 /*
  2  * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 27 
 28 #include "oops/stackChunkOop.hpp"
 29 
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/barrierSet.hpp"
 32 #include "gc/shared/barrierSetStackChunk.hpp"
 33 #include "gc/shared/gc_globals.hpp"
 34 #include "memory/memRegion.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/access.inline.hpp"
 37 #include "oops/instanceStackChunkKlass.inline.hpp"
 38 #include "runtime/continuationJavaClasses.inline.hpp"
 39 #include "runtime/frame.hpp"
 40 #include "runtime/globals.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 #include "runtime/objectMonitor.hpp"
 43 #include "runtime/registerMap.hpp"
 44 #include "runtime/smallRegisterMap.inline.hpp"
 45 #include "utilities/macros.hpp"
 46 #include CPU_HEADER_INLINE(stackChunkOop)
 47 
 48 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
 49 
 50 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
 51   assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
 52   return stackChunkOop(obj);
 53 }
 54 
 55 inline stackChunkOop stackChunkOopDesc::parent() const         { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
 56 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
 57 template<typename P>
 58 inline void stackChunkOopDesc::set_parent_raw(oop value)       { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
 59 template<DecoratorSet decorators>
 60 inline void stackChunkOopDesc::set_parent_access(oop value)    { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
 61 
 62 inline int stackChunkOopDesc::stack_size() const        { return jdk_internal_vm_StackChunk::size(as_oop()); }

 72 
 73 inline uint8_t stackChunkOopDesc::flags() const         { return jdk_internal_vm_StackChunk::flags(as_oop()); }
 74 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
 75 
 76 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
 77 
 78 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
 79   jdk_internal_vm_StackChunk::release_set_flags(this, value);
 80 }
 81 
 82 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
 83   return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
 84 }
 85 
 86 inline int stackChunkOopDesc::max_thawing_size() const          { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
 87 inline void stackChunkOopDesc::set_max_thawing_size(int value)  {
 88   assert(value >= 0, "size must be >= 0");
 89   jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
 90 }
 91 
 92 inline uint8_t stackChunkOopDesc::lockstack_size() const         { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
 93 inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
 94 
 95 inline ObjectWaiter* stackChunkOopDesc::object_waiter() const       { return (ObjectWaiter*)jdk_internal_vm_StackChunk::objectWaiter(as_oop()); }
 96 inline void stackChunkOopDesc::set_object_waiter(ObjectWaiter* obj) { jdk_internal_vm_StackChunk::set_objectWaiter(this, (address)obj); }
 97 
 98 inline oop stackChunkOopDesc::cont() const                {
 99   if (UseZGC && !ZGenerational) {
100     assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
101     // The state of the cont oop is used by XCollectedHeap::requires_barriers,
102     // to determine the age of the stackChunkOopDesc. For that to work, it is
103     // only the GC that is allowed to perform a load barrier on the oop.
104     // This function is used by non-GC code and therfore create a stack-local
105     // copy on the oop and perform the load barrier on that copy instead.
106     oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
107     obj = (oop)NativeAccess<>::oop_load(&obj);
108     return obj;
109   }
110   return jdk_internal_vm_StackChunk::cont(as_oop());
111 }
112 inline void stackChunkOopDesc::set_cont(oop value)        { jdk_internal_vm_StackChunk::set_cont(this, value); }
113 template<typename P>
114 inline void stackChunkOopDesc::set_cont_raw(oop value)    { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
115 template<DecoratorSet decorators>
116 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
117 

157   HeapWord* end = start + stack_size();
158   return (HeapWord*)p >= start && (HeapWord*)p < end;
159 }
160 
161 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
162   return (flags() & flag) != 0;
163 }
164 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
165   return (flags_acquire() & flag) != 0;
166 }
167 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
168   uint32_t flags = this->flags();
169   set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
170 }
171 inline void stackChunkOopDesc::clear_flags() {
172   set_flags(0);
173 }
174 
175 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
176 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
177   assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAG_PREEMPTED)) == 0, "other flags should not be set");
178   set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
179 }
180 
181 inline bool stackChunkOopDesc::preempted() const { return is_flag(FLAG_PREEMPTED); }
182 inline void stackChunkOopDesc::set_preempted(bool value) {
183   assert(preempted() != value, "");
184   set_flag(FLAG_PREEMPTED, value);
185 }
186 
187 inline ObjectMonitor* stackChunkOopDesc::current_pending_monitor() const {
188   ObjectWaiter* waiter = object_waiter();
189   if (waiter != nullptr && (waiter->is_monitorenter() || (waiter->is_wait() && (waiter->at_reenter() || waiter->notified())))) {
190     return waiter->monitor();
191   }
192   return nullptr;
193 }
194 
195 inline ObjectMonitor* stackChunkOopDesc::current_waiting_monitor() const {
196   ObjectWaiter* waiter = object_waiter();
197   return waiter != nullptr && waiter->is_wait() ? waiter->monitor() : nullptr;
198 }
199 
200 inline bool stackChunkOopDesc::has_lockstack() const         { return is_flag(FLAG_HAS_LOCKSTACK); }
201 inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
202 
203 inline bool stackChunkOopDesc::is_gc_mode() const                  { return is_flag(FLAG_GC_MODE); }
204 inline bool stackChunkOopDesc::is_gc_mode_acquire() const          { return is_flag_acquire(FLAG_GC_MODE); }
205 inline void stackChunkOopDesc::set_gc_mode(bool value)             { set_flag(FLAG_GC_MODE, value); }
206 
207 inline bool stackChunkOopDesc::has_bitmap() const                  { return is_flag(FLAG_HAS_BITMAP); }
208 inline void stackChunkOopDesc::set_has_bitmap(bool value)          { set_flag(FLAG_HAS_BITMAP, value); }
209 
210 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
211 
212 inline bool stackChunkOopDesc::requires_barriers() {
213   return Universe::heap()->requires_barriers(this);
214 }
215 
216 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
217 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
218   if (frame_kind == ChunkFrames::Mixed) {
219     // we could freeze deopted frames in slow mode.
220     f.handle_deopted();
221   }
222   do_barriers0<barrier>(f, map);
223 }
224 
225 template <typename OopT, class StackChunkLockStackClosureType>
226 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
227   assert(LockingMode == LM_LIGHTWEIGHT, "");
228   int cnt = lockstack_size();
229   intptr_t* lockstart_addr = start_address();
230   for (int i = 0; i < cnt; i++) {
231     closure->do_oop((OopT*)&lockstart_addr[i]);
232   }
233 }
234 
235 template <class StackChunkFrameClosureType>
236 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
237   has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
238                      : iterate_stack<ChunkFrames::CompiledOnly>(closure);
239 }
240 
241 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
242 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
243   const SmallRegisterMap* map = SmallRegisterMap::instance();
244   assert(!map->in_cont(), "");
245 
246   StackChunkFrameStream<frame_kind> f(this);
247   bool should_continue = true;
248 
249   if (f.is_stub()) {
250     RegisterMap full_map(nullptr,
251                          RegisterMap::UpdateMap::include,
252                          RegisterMap::ProcessFrames::skip,
253                          RegisterMap::WalkContinuation::include);
254     full_map.set_include_argument_oops(false);
255     closure->do_frame(f, map);
256 
257     f.next(&full_map);

258     assert(!f.is_done(), "");
259     assert(f.is_compiled(), "");
260 
261     should_continue = closure->do_frame(f, &full_map);
262     f.next(map);

263   }
264   assert(!f.is_stub(), "");
265 
266   for(; should_continue && !f.is_done(); f.next(map)) {
267     if (frame_kind == ChunkFrames::Mixed) {
268       // in slow mode we might freeze deoptimized frames
269       f.handle_deopted();
270     }
271     should_continue = closure->do_frame(f, map);
272   }
273 }
274 
275 inline frame stackChunkOopDesc::relativize(frame fr)   const { relativize_frame(fr);   return fr; }
276 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
277 
278 inline void* stackChunkOopDesc::gc_data() const {
279   int stack_sz = stack_size();
280   assert(stack_sz != 0, "stack should not be empty");
281 
282   // The gc data is located after the stack.

303 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
304   assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
305   assert(p >= (OopT*)start_address(), "Address not in chunk");
306   return p - (OopT*)start_address();
307 }
308 
309 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
310   return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
311 }
312 
313 template <typename OopT>
314 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
315   return (OopT*)start_address() + index;
316 }
317 
318 inline MemRegion stackChunkOopDesc::range() {
319   return MemRegion((HeapWord*)this, size());
320 }
321 
322 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
323   assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
324   assert(is_in_chunk(fr.unextended_sp()), "");
325 
326   intptr_t* base = fr.real_fp(); // equal to the caller's sp
327   intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
328   assert(base > loc, "");
329   return (int)(base - loc);
330 }
331 
332 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
333   assert(fr.is_compiled_frame(), "");
334   return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
335 }
336 
337 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
338   assert(fr.is_compiled_frame(), "");
339   assert(map != nullptr, "");
340   assert(map->stack_chunk() == as_oop(), "");
341 
342   // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
343   intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
< prev index next >