22 *
23 */
24
25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
27
28 #include "oops/stackChunkOop.hpp"
29
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/barrierSetStackChunk.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "memory/memRegion.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/instanceStackChunkKlass.inline.hpp"
38 #include "runtime/continuationJavaClasses.inline.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/registerMap.hpp"
43 #include "runtime/smallRegisterMap.inline.hpp"
44 #include "utilities/macros.hpp"
45 #include CPU_HEADER_INLINE(stackChunkOop)
46
47 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
48
49 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
50 assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
51 return stackChunkOop(obj);
52 }
53
54 inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
55 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
56 template<typename P>
57 inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
58 template<DecoratorSet decorators>
59 inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
60
61 inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); }
71
72 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
73 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
74
75 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
76
77 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
78 jdk_internal_vm_StackChunk::release_set_flags(this, value);
79 }
80
81 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
82 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
83 }
84
85 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
86 inline void stackChunkOopDesc::set_max_thawing_size(int value) {
87 assert(value >= 0, "size must be >= 0");
88 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
89 }
90
91 inline oop stackChunkOopDesc::cont() const {
92 if (UseZGC && !ZGenerational) {
93 assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
94 // The state of the cont oop is used by XCollectedHeap::requires_barriers,
95 // to determine the age of the stackChunkOopDesc. For that to work, it is
96 // only the GC that is allowed to perform a load barrier on the oop.
97 // This function is used by non-GC code and therfore create a stack-local
98 // copy on the oop and perform the load barrier on that copy instead.
99 oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
100 obj = (oop)NativeAccess<>::oop_load(&obj);
101 return obj;
102 }
103 return jdk_internal_vm_StackChunk::cont(as_oop());
104 }
105 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
106 template<typename P>
107 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
108 template<DecoratorSet decorators>
109 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
110
144 HeapWord* end = start + stack_size();
145 return (HeapWord*)p >= start && (HeapWord*)p < end;
146 }
147
148 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
149 HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom;
150 HeapWord* end = start + stack_size();
151 return (HeapWord*)p >= start && (HeapWord*)p < end;
152 }
153
154 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
155 return (flags() & flag) != 0;
156 }
157 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
158 return (flags_acquire() & flag) != 0;
159 }
160 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
161 uint32_t flags = this->flags();
162 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
163 }
164 inline void stackChunkOopDesc::clear_flags() {
165 set_flags(0);
166 }
167
168 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
169 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
170 assert((flags() & ~FLAG_HAS_INTERPRETED_FRAMES) == 0, "other flags should not be set");
171 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
172 }
173
174 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
175 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
176 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
177
178 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
179 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); }
180
181 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
182
183 inline bool stackChunkOopDesc::requires_barriers() {
184 return Universe::heap()->requires_barriers(this);
185 }
186
187 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
188 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
189 if (frame_kind == ChunkFrames::Mixed) {
190 // we could freeze deopted frames in slow mode.
191 f.handle_deopted();
192 }
193 do_barriers0<barrier>(f, map);
194 }
195
196 template <class StackChunkFrameClosureType>
197 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
198 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
199 : iterate_stack<ChunkFrames::CompiledOnly>(closure);
200 }
201
202 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
203 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
204 const SmallRegisterMap* map = SmallRegisterMap::instance;
205 assert(!map->in_cont(), "");
206
207 StackChunkFrameStream<frame_kind> f(this);
208 bool should_continue = true;
209
210 if (f.is_stub()) {
211 RegisterMap full_map(nullptr,
212 RegisterMap::UpdateMap::include,
213 RegisterMap::ProcessFrames::skip,
214 RegisterMap::WalkContinuation::include);
215 full_map.set_include_argument_oops(false);
216
217 f.next(&full_map);
218
219 assert(!f.is_done(), "");
220 assert(f.is_compiled(), "");
221
222 should_continue = closure->do_frame(f, &full_map);
223 f.next(map);
224 f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call)
225 }
226 assert(!f.is_stub(), "");
227
228 for(; should_continue && !f.is_done(); f.next(map)) {
229 if (frame_kind == ChunkFrames::Mixed) {
230 // in slow mode we might freeze deoptimized frames
231 f.handle_deopted();
232 }
233 should_continue = closure->do_frame(f, map);
234 }
235 }
236
237 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
238 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
239
240 inline void* stackChunkOopDesc::gc_data() const {
241 int stack_sz = stack_size();
242 assert(stack_sz != 0, "stack should not be empty");
243
244 // The gc data is located after the stack.
265 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
266 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
267 assert(p >= (OopT*)start_address(), "Address not in chunk");
268 return p - (OopT*)start_address();
269 }
270
271 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
272 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
273 }
274
275 template <typename OopT>
276 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
277 return (OopT*)start_address() + index;
278 }
279
280 inline MemRegion stackChunkOopDesc::range() {
281 return MemRegion((HeapWord*)this, size());
282 }
283
284 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
285 assert(fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), "");
286 assert(is_in_chunk(fr.unextended_sp()), "");
287
288 intptr_t* base = fr.real_fp(); // equal to the caller's sp
289 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
290 assert(base > loc, "");
291 return (int)(base - loc);
292 }
293
294 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
295 assert(fr.is_compiled_frame(), "");
296 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
297 }
298
299 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
300 assert(fr.is_compiled_frame(), "");
301 assert(map != nullptr, "");
302 assert(map->stack_chunk() == as_oop(), "");
303
304 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
305 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
|
22 *
23 */
24
25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
27
28 #include "oops/stackChunkOop.hpp"
29
30 #include "gc/shared/collectedHeap.hpp"
31 #include "gc/shared/barrierSet.hpp"
32 #include "gc/shared/barrierSetStackChunk.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "memory/memRegion.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/instanceStackChunkKlass.inline.hpp"
38 #include "runtime/continuationJavaClasses.inline.hpp"
39 #include "runtime/frame.inline.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/registerMap.hpp"
44 #include "runtime/smallRegisterMap.inline.hpp"
45 #include "utilities/macros.hpp"
46 #include CPU_HEADER_INLINE(stackChunkOop)
47
48 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
49
50 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
51 assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
52 return stackChunkOop(obj);
53 }
54
55 inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
56 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
57 template<typename P>
58 inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
59 template<DecoratorSet decorators>
60 inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
61
62 inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); }
72
73 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
74 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
75
76 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
77
78 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
79 jdk_internal_vm_StackChunk::release_set_flags(this, value);
80 }
81
82 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
83 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
84 }
85
86 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
87 inline void stackChunkOopDesc::set_max_thawing_size(int value) {
88 assert(value >= 0, "size must be >= 0");
89 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
90 }
91
92 inline uint8_t stackChunkOopDesc::lockstack_size() const { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
93 inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
94
95 inline ObjectWaiter* stackChunkOopDesc::object_waiter() const { return (ObjectWaiter*)jdk_internal_vm_StackChunk::objectWaiter(as_oop()); }
96 inline void stackChunkOopDesc::set_object_waiter(ObjectWaiter* obj) { jdk_internal_vm_StackChunk::set_objectWaiter(this, (address)obj); }
97
98 inline oop stackChunkOopDesc::cont() const {
99 if (UseZGC && !ZGenerational) {
100 assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
101 // The state of the cont oop is used by XCollectedHeap::requires_barriers,
102 // to determine the age of the stackChunkOopDesc. For that to work, it is
103 // only the GC that is allowed to perform a load barrier on the oop.
104 // This function is used by non-GC code and therfore create a stack-local
105 // copy on the oop and perform the load barrier on that copy instead.
106 oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
107 obj = (oop)NativeAccess<>::oop_load(&obj);
108 return obj;
109 }
110 return jdk_internal_vm_StackChunk::cont(as_oop());
111 }
112 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
113 template<typename P>
114 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
115 template<DecoratorSet decorators>
116 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
117
151 HeapWord* end = start + stack_size();
152 return (HeapWord*)p >= start && (HeapWord*)p < end;
153 }
154
155 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
156 HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom;
157 HeapWord* end = start + stack_size();
158 return (HeapWord*)p >= start && (HeapWord*)p < end;
159 }
160
161 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
162 return (flags() & flag) != 0;
163 }
164 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
165 return (flags_acquire() & flag) != 0;
166 }
167 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
168 uint32_t flags = this->flags();
169 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
170 }
171 inline void stackChunkOopDesc::clear_flags(uint8_t flag) {
172 uint32_t flags = this->flags();
173 set_flags((uint8_t)(flags &= ~flag));
174 }
175 inline void stackChunkOopDesc::clear_flags() {
176 set_flags(0);
177 }
178
179 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
180 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
181 assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAGS_PREEMPTED)) == 0, "other flags should not be set");
182 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
183 }
184
185 inline void stackChunkOopDesc::set_preempt_kind(int freeze_kind) {
186 assert((flags() & FLAGS_PREEMPTED) == 0, "");
187 assert(freeze_kind == freeze_on_monitorenter || freeze_kind == freeze_on_wait, "");
188 uint8_t flag = freeze_kind == freeze_on_monitorenter ? FLAG_PREEMPTED_MONITORENTER : FLAG_PREEMPTED_WAIT;
189 set_flag(flag, true);
190 }
191
192 inline int stackChunkOopDesc::get_and_clear_preempt_kind() {
193 assert((is_flag(FLAG_PREEMPTED_MONITORENTER) && !is_flag(FLAG_PREEMPTED_WAIT))
194 || (is_flag(FLAG_PREEMPTED_WAIT) && !is_flag(FLAG_PREEMPTED_MONITORENTER)), "");
195 int kind = is_flag(FLAG_PREEMPTED_MONITORENTER) ? freeze_on_monitorenter : freeze_on_wait;
196 clear_flags(FLAGS_PREEMPTED);
197 return kind;
198 }
199
200 inline ObjectMonitor* stackChunkOopDesc::current_pending_monitor() const {
201 ObjectWaiter* waiter = object_waiter();
202 if (waiter != nullptr && (waiter->is_monitorenter() || (waiter->is_wait() && (waiter->at_reenter() || waiter->notified())))) {
203 return waiter->monitor();
204 }
205 return nullptr;
206 }
207
208 inline ObjectMonitor* stackChunkOopDesc::current_waiting_monitor() const {
209 ObjectWaiter* waiter = object_waiter();
210 return waiter != nullptr && waiter->is_wait() ? waiter->monitor() : nullptr;
211 }
212
213 inline bool stackChunkOopDesc::has_lockstack() const { return is_flag(FLAG_HAS_LOCKSTACK); }
214 inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
215
216 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
217 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
218 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
219
220 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
221 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); }
222
223 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
224
225 inline bool stackChunkOopDesc::requires_barriers() {
226 return Universe::heap()->requires_barriers(this);
227 }
228
229 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
230 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
231 if (frame_kind == ChunkFrames::Mixed) {
232 // we could freeze deopted frames in slow mode.
233 f.handle_deopted();
234 }
235 do_barriers0<barrier>(f, map);
236 }
237
238 template <typename OopT, class StackChunkLockStackClosureType>
239 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
240 if (LockingMode != LM_LIGHTWEIGHT) {
241 return;
242 }
243 int cnt = lockstack_size();
244 intptr_t* lockstart_addr = start_address();
245 for (int i = 0; i < cnt; i++) {
246 closure->do_oop((OopT*)&lockstart_addr[i]);
247 }
248 }
249
250 template <class StackChunkFrameClosureType>
251 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
252 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
253 : iterate_stack<ChunkFrames::CompiledOnly>(closure);
254 }
255
256 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
257 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
258 const SmallRegisterMap* map = SmallRegisterMap::instance;
259 assert(!map->in_cont(), "");
260
261 StackChunkFrameStream<frame_kind> f(this);
262 bool should_continue = true;
263
264 if (f.is_stub()) {
265 RegisterMap full_map(nullptr,
266 RegisterMap::UpdateMap::include,
267 RegisterMap::ProcessFrames::skip,
268 RegisterMap::WalkContinuation::include);
269 full_map.set_include_argument_oops(false);
270 closure->do_frame(f, map);
271
272 f.next(&full_map);
273 if (f.is_done()) return;
274
275 should_continue = closure->do_frame(f, &full_map);
276 f.next(&map);
277 }
278 assert(!f.is_stub(), "");
279
280 for(; should_continue && !f.is_done(); f.next(map)) {
281 if (frame_kind == ChunkFrames::Mixed) {
282 // in slow mode we might freeze deoptimized frames
283 f.handle_deopted();
284 }
285 should_continue = closure->do_frame(f, map);
286 }
287 }
288
289 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
290 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
291
292 inline void* stackChunkOopDesc::gc_data() const {
293 int stack_sz = stack_size();
294 assert(stack_sz != 0, "stack should not be empty");
295
296 // The gc data is located after the stack.
317 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
318 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
319 assert(p >= (OopT*)start_address(), "Address not in chunk");
320 return p - (OopT*)start_address();
321 }
322
323 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
324 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
325 }
326
327 template <typename OopT>
328 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
329 return (OopT*)start_address() + index;
330 }
331
332 inline MemRegion stackChunkOopDesc::range() {
333 return MemRegion((HeapWord*)this, size());
334 }
335
336 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
337 assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
338 assert(is_in_chunk(fr.unextended_sp()), "");
339
340 intptr_t* base = fr.real_fp(); // equal to the caller's sp
341 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
342 assert(base > loc, "");
343 return (int)(base - loc);
344 }
345
346 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
347 assert(fr.is_compiled_frame(), "");
348 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
349 }
350
351 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
352 assert(fr.is_compiled_frame(), "");
353 assert(map != nullptr, "");
354 assert(map->stack_chunk() == as_oop(), "");
355
356 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
357 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
|