1 /*
  2  * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
 27 
 28 #include "oops/stackChunkOop.hpp"
 29 
 30 #include "gc/shared/collectedHeap.hpp"
 31 #include "gc/shared/barrierSet.hpp"
 32 #include "gc/shared/barrierSetStackChunk.hpp"
 33 #include "gc/shared/gc_globals.hpp"
 34 #include "memory/memRegion.hpp"
 35 #include "memory/universe.hpp"
 36 #include "oops/access.inline.hpp"
 37 #include "oops/instanceStackChunkKlass.inline.hpp"
 38 #include "runtime/continuationJavaClasses.inline.hpp"
 39 #include "runtime/frame.inline.hpp"
 40 #include "runtime/globals.hpp"
 41 #include "runtime/handles.inline.hpp"
 42 #include "runtime/objectMonitor.hpp"
 43 #include "runtime/registerMap.hpp"
 44 #include "runtime/smallRegisterMap.inline.hpp"
 45 #include "utilities/macros.hpp"
 46 #include CPU_HEADER_INLINE(stackChunkOop)
 47 
 48 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
 49 
 50 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
 51   assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
 52   return stackChunkOop(obj);
 53 }
 54 
 55 inline stackChunkOop stackChunkOopDesc::parent() const         { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
 56 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
 57 template<typename P>
 58 inline void stackChunkOopDesc::set_parent_raw(oop value)       { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
 59 template<DecoratorSet decorators>
 60 inline void stackChunkOopDesc::set_parent_access(oop value)    { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
 61 
 62 inline int stackChunkOopDesc::stack_size() const        { return jdk_internal_vm_StackChunk::size(as_oop()); }
 63 
 64 inline int stackChunkOopDesc::bottom() const            { return jdk_internal_vm_StackChunk::bottom(as_oop()); }
 65 inline void stackChunkOopDesc::set_bottom(int value)    { jdk_internal_vm_StackChunk::set_bottom(this, value); }
 66 
 67 inline int stackChunkOopDesc::sp() const                { return jdk_internal_vm_StackChunk::sp(as_oop()); }
 68 inline void stackChunkOopDesc::set_sp(int value)        { jdk_internal_vm_StackChunk::set_sp(this, value); }
 69 
 70 inline address stackChunkOopDesc::pc() const            { return jdk_internal_vm_StackChunk::pc(as_oop()); }
 71 inline void stackChunkOopDesc::set_pc(address value)    { jdk_internal_vm_StackChunk::set_pc(this, value); }
 72 
 73 inline uint8_t stackChunkOopDesc::flags() const         { return jdk_internal_vm_StackChunk::flags(as_oop()); }
 74 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
 75 
 76 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
 77 
 78 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
 79   jdk_internal_vm_StackChunk::release_set_flags(this, value);
 80 }
 81 
 82 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
 83   return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
 84 }
 85 
 86 inline int stackChunkOopDesc::max_thawing_size() const          { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
 87 inline void stackChunkOopDesc::set_max_thawing_size(int value)  {
 88   assert(value >= 0, "size must be >= 0");
 89   jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
 90 }
 91 
 92 inline uint8_t stackChunkOopDesc::lockstack_size() const         { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
 93 inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
 94 
 95 inline ObjectWaiter* stackChunkOopDesc::object_waiter() const       { return (ObjectWaiter*)jdk_internal_vm_StackChunk::objectWaiter(as_oop()); }
 96 inline void stackChunkOopDesc::set_object_waiter(ObjectWaiter* obj) { jdk_internal_vm_StackChunk::set_objectWaiter(this, (address)obj); }
 97 
 98 inline oop stackChunkOopDesc::cont() const                {
 99   if (UseZGC && !ZGenerational) {
100     assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops");
101     // The state of the cont oop is used by XCollectedHeap::requires_barriers,
102     // to determine the age of the stackChunkOopDesc. For that to work, it is
103     // only the GC that is allowed to perform a load barrier on the oop.
104     // This function is used by non-GC code and therfore create a stack-local
105     // copy on the oop and perform the load barrier on that copy instead.
106     oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop());
107     obj = (oop)NativeAccess<>::oop_load(&obj);
108     return obj;
109   }
110   return jdk_internal_vm_StackChunk::cont(as_oop());
111 }
112 inline void stackChunkOopDesc::set_cont(oop value)        { jdk_internal_vm_StackChunk::set_cont(this, value); }
113 template<typename P>
114 inline void stackChunkOopDesc::set_cont_raw(oop value)    { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
115 template<DecoratorSet decorators>
116 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
117 
118 inline int stackChunkOopDesc::argsize() const {
119   assert(!is_empty(), "should not ask for argsize in empty chunk");
120   return stack_size() - bottom() - frame::metadata_words_at_top;
121 }
122 
123 inline HeapWord* stackChunkOopDesc::start_of_stack() const {
124    return (HeapWord*)(cast_from_oop<intptr_t>(as_oop()) + InstanceStackChunkKlass::offset_of_stack());
125 }
126 
127 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)start_of_stack(); }
128 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); }
129 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); }
130 inline intptr_t* stackChunkOopDesc::sp_address()  const { return start_address() + sp(); }
131 
132 inline int stackChunkOopDesc::to_offset(intptr_t* p) const {
133   assert(is_in_chunk(p)
134     || (p >= start_address() && (p - start_address()) <= stack_size() + frame::metadata_words),
135     "p: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address()));
136   return (int)(p - start_address());
137 }
138 
139 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const {
140   assert(offset <= stack_size(), "");
141   return start_address() + offset;
142 }
143 
144 inline bool stackChunkOopDesc::is_empty() const {
145   assert(sp() <= bottom(), "");
146   return sp() == bottom();
147 }
148 
149 inline bool stackChunkOopDesc::is_in_chunk(void* p) const {
150   HeapWord* start = (HeapWord*)start_address();
151   HeapWord* end = start + stack_size();
152   return (HeapWord*)p >= start && (HeapWord*)p < end;
153 }
154 
155 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
156   HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom;
157   HeapWord* end = start + stack_size();
158   return (HeapWord*)p >= start && (HeapWord*)p < end;
159 }
160 
161 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
162   return (flags() & flag) != 0;
163 }
164 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
165   return (flags_acquire() & flag) != 0;
166 }
167 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
168   uint32_t flags = this->flags();
169   set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
170 }
171 inline void stackChunkOopDesc::clear_flags(uint8_t flag) {
172   uint32_t flags = this->flags();
173   set_flags((uint8_t)(flags &= ~flag));
174 }
175 inline void stackChunkOopDesc::clear_flags() {
176   set_flags(0);
177 }
178 
179 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
180 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
181   assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAGS_PREEMPTED)) == 0, "other flags should not be set");
182   set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
183 }
184 
185 inline void stackChunkOopDesc::set_preempt_kind(int freeze_kind) {
186   assert((flags() & FLAGS_PREEMPTED) == 0, "");
187   assert(freeze_kind == freeze_on_monitorenter || freeze_kind == freeze_on_wait, "");
188   uint8_t flag = freeze_kind == freeze_on_monitorenter ? FLAG_PREEMPTED_MONITORENTER : FLAG_PREEMPTED_WAIT;
189   set_flag(flag, true);
190 }
191 
192 inline int stackChunkOopDesc::get_and_clear_preempt_kind() {
193   assert((is_flag(FLAG_PREEMPTED_MONITORENTER) && !is_flag(FLAG_PREEMPTED_WAIT))
194          || (is_flag(FLAG_PREEMPTED_WAIT) && !is_flag(FLAG_PREEMPTED_MONITORENTER)), "");
195   int kind = is_flag(FLAG_PREEMPTED_MONITORENTER) ? freeze_on_monitorenter : freeze_on_wait;
196   clear_flags(FLAGS_PREEMPTED);
197   return kind;
198 }
199 
200 inline ObjectMonitor* stackChunkOopDesc::current_pending_monitor() const {
201   ObjectWaiter* waiter = object_waiter();
202   if (waiter != nullptr && (waiter->is_monitorenter() || (waiter->is_wait() && (waiter->at_reenter() || waiter->notified())))) {
203     return waiter->monitor();
204   }
205   return nullptr;
206 }
207 
208 inline ObjectMonitor* stackChunkOopDesc::current_waiting_monitor() const {
209   ObjectWaiter* waiter = object_waiter();
210   return waiter != nullptr && waiter->is_wait() ? waiter->monitor() : nullptr;
211 }
212 
213 inline bool stackChunkOopDesc::has_lockstack() const         { return is_flag(FLAG_HAS_LOCKSTACK); }
214 inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
215 
216 inline bool stackChunkOopDesc::is_gc_mode() const                  { return is_flag(FLAG_GC_MODE); }
217 inline bool stackChunkOopDesc::is_gc_mode_acquire() const          { return is_flag_acquire(FLAG_GC_MODE); }
218 inline void stackChunkOopDesc::set_gc_mode(bool value)             { set_flag(FLAG_GC_MODE, value); }
219 
220 inline bool stackChunkOopDesc::has_bitmap() const                  { return is_flag(FLAG_HAS_BITMAP); }
221 inline void stackChunkOopDesc::set_has_bitmap(bool value)          { set_flag(FLAG_HAS_BITMAP, value); }
222 
223 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
224 
225 inline bool stackChunkOopDesc::requires_barriers() {
226   return Universe::heap()->requires_barriers(this);
227 }
228 
229 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
230 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
231   if (frame_kind == ChunkFrames::Mixed) {
232     // we could freeze deopted frames in slow mode.
233     f.handle_deopted();
234   }
235   do_barriers0<barrier>(f, map);
236 }
237 
238 template <typename OopT, class StackChunkLockStackClosureType>
239 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
240   if (LockingMode != LM_LIGHTWEIGHT) {
241     return;
242   }
243   int cnt = lockstack_size();
244   intptr_t* lockstart_addr = start_address();
245   for (int i = 0; i < cnt; i++) {
246     closure->do_oop((OopT*)&lockstart_addr[i]);
247   }
248 }
249 
250 template <class StackChunkFrameClosureType>
251 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
252   has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
253                      : iterate_stack<ChunkFrames::CompiledOnly>(closure);
254 }
255 
256 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
257 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
258   const SmallRegisterMap* map = SmallRegisterMap::instance;
259   assert(!map->in_cont(), "");
260 
261   StackChunkFrameStream<frame_kind> f(this);
262   bool should_continue = true;
263 
264   if (f.is_stub()) {
265     RegisterMap full_map(nullptr,
266                          RegisterMap::UpdateMap::include,
267                          RegisterMap::ProcessFrames::skip,
268                          RegisterMap::WalkContinuation::include);
269     full_map.set_include_argument_oops(false);
270     closure->do_frame(f, map);
271 
272     f.next(&full_map);
273     if (f.is_done()) return;
274 
275     should_continue = closure->do_frame(f, &full_map);
276     f.next(&map);
277   }
278   assert(!f.is_stub(), "");
279 
280   for(; should_continue && !f.is_done(); f.next(map)) {
281     if (frame_kind == ChunkFrames::Mixed) {
282       // in slow mode we might freeze deoptimized frames
283       f.handle_deopted();
284     }
285     should_continue = closure->do_frame(f, map);
286   }
287 }
288 
289 inline frame stackChunkOopDesc::relativize(frame fr)   const { relativize_frame(fr);   return fr; }
290 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
291 
292 inline void* stackChunkOopDesc::gc_data() const {
293   int stack_sz = stack_size();
294   assert(stack_sz != 0, "stack should not be empty");
295 
296   // The gc data is located after the stack.
297   return start_of_stack() + stack_sz;
298 }
299 
300 inline BitMapView stackChunkOopDesc::bitmap() const {
301   HeapWord* bitmap_addr = static_cast<HeapWord*>(gc_data());
302   int stack_sz = stack_size();
303   size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz);
304 
305   BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits);
306 
307   DEBUG_ONLY(bitmap.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));)
308 
309   return bitmap;
310 }
311 
312 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const {
313   return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
314 }
315 
316 template <typename OopT>
317 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
318   assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
319   assert(p >= (OopT*)start_address(), "Address not in chunk");
320   return p - (OopT*)start_address();
321 }
322 
323 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
324   return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
325 }
326 
327 template <typename OopT>
328 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
329   return (OopT*)start_address() + index;
330 }
331 
332 inline MemRegion stackChunkOopDesc::range() {
333   return MemRegion((HeapWord*)this, size());
334 }
335 
336 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
337   assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
338   assert(is_in_chunk(fr.unextended_sp()), "");
339 
340   intptr_t* base = fr.real_fp(); // equal to the caller's sp
341   intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
342   assert(base > loc, "");
343   return (int)(base - loc);
344 }
345 
346 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
347   assert(fr.is_compiled_frame(), "");
348   return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
349 }
350 
351 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
352   assert(fr.is_compiled_frame(), "");
353   assert(map != nullptr, "");
354   assert(map->stack_chunk() == as_oop(), "");
355 
356   // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
357   intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
358   intptr_t* base = derelativize_address(fr.offset_sp());
359   return (address)(base - offset);
360 }
361 
362 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) {
363   return derelativize(fr).interpreter_frame_method();
364 }
365 
366 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) {
367   return derelativize(fr).interpreter_frame_bcp();
368 }
369 
370 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const {
371   frame heap_frame = derelativize(fr);
372   assert(heap_frame.is_heap_frame(), "must be");
373   return heap_frame.interpreter_frame_expression_stack_at(index);
374 }
375 
376 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const {
377   frame heap_frame = derelativize(fr);
378   assert(heap_frame.is_heap_frame(), "must be");
379   return heap_frame.interpreter_frame_local_at(index);
380 }
381 
382 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) {
383   log_develop_trace(continuations)("Copying from v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)",
384     p2i(from), p2i(from + size), size, size << LogBytesPerWord);
385   log_develop_trace(continuations)("Copying to h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
386     p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(),
387     relative_base() - (to + size), size, size << LogBytesPerWord);
388 
389   assert(to >= start_address(), "Chunk underflow");
390   assert(to + size <= end_address(), "Chunk overflow");
391 
392 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO)
393   // Suppress compilation warning-as-error on unimplemented architectures
394   // that stub out arch-specific methods. Some compilers are smart enough
395   // to figure out the argument is always null and then warn about it.
396   if (to != nullptr)
397 #endif
398   memcpy(to, from, size << LogBytesPerWord);
399 }
400 
401 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) {
402   log_develop_trace(continuations)("Copying from h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
403     p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(),
404     relative_base() - (from + size), size, size << LogBytesPerWord);
405   log_develop_trace(continuations)("Copying to v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", p2i(to),
406     p2i(to + size), size, size << LogBytesPerWord);
407 
408   assert(from >= start_address(), "");
409   assert(from + size <= end_address(), "");
410 
411 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO)
412   // Suppress compilation warning-as-error on unimplemented architectures
413   // that stub out arch-specific methods. Some compilers are smart enough
414   // to figure out the argument is always null and then warn about it.
415   if (to != nullptr)
416 #endif
417   memcpy(to, from, size << LogBytesPerWord);
418 }
419 
420 template <typename OopT>
421 inline oop stackChunkOopDesc::load_oop(OopT* addr) {
422   return BarrierSet::barrier_set()->barrier_set_stack_chunk()->load_oop(this, addr);
423 }
424 
425 inline intptr_t* stackChunkOopDesc::relative_base() const {
426   // we relativize with respect to end rather than start because GC might compact the chunk
427   return end_address() + frame::metadata_words;
428 }
429 
430 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const {
431   intptr_t* base = relative_base();
432   intptr_t* p = base - offset;
433   assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT,
434          p2i(start_address()), p2i(p), p2i(base));
435   return p;
436 }
437 
438 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const {
439   intptr_t* base = relative_base();
440   intptr_t offset = base - p;
441   assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT,
442          p2i(start_address()), p2i(p), p2i(base));
443   assert(0 <= offset && offset <= std::numeric_limits<int>::max(), "offset: " PTR_FORMAT, offset);
444   return (int)offset;
445 }
446 
447 inline void stackChunkOopDesc::relativize_frame(frame& fr) const {
448   fr.set_offset_sp(relativize_address(fr.sp()));
449   fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp()));
450   relativize_frame_pd(fr);
451 }
452 
453 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const {
454   fr.set_sp(derelativize_address(fr.offset_sp()));
455   fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp()));
456   derelativize_frame_pd(fr);
457   fr.set_frame_index(-1); // for the sake of assertions in frame
458 }
459 
460 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP