1 /*
2 * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP
27
28 #include "oops/stackChunkOop.hpp"
29
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/barrierSetStackChunk.hpp"
32 #include "gc/shared/collectedHeap.hpp"
33 #include "gc/shared/gc_globals.hpp"
34 #include "memory/memRegion.hpp"
35 #include "memory/universe.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/instanceStackChunkKlass.inline.hpp"
38 #include "runtime/continuationJavaClasses.inline.hpp"
39 #include "runtime/frame.hpp"
40 #include "runtime/globals.hpp"
41 #include "runtime/handles.inline.hpp"
42 #include "runtime/objectMonitor.hpp"
43 #include "runtime/registerMap.hpp"
44 #include "runtime/smallRegisterMap.inline.hpp"
45 #include "utilities/macros.hpp"
46 #include CPU_HEADER_INLINE(stackChunkOop)
47
48 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline)
49
50 inline stackChunkOop stackChunkOopDesc::cast(oop obj) {
51 assert(obj == nullptr || obj->is_stackChunk(), "Wrong type");
52 return stackChunkOop(obj);
53 }
54
55 inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); }
56 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); }
57 template<typename P>
58 inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); }
59 template<DecoratorSet decorators>
60 inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); }
61
62 inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); }
63
64 inline int stackChunkOopDesc::bottom() const { return jdk_internal_vm_StackChunk::bottom(as_oop()); }
65 inline void stackChunkOopDesc::set_bottom(int value) { jdk_internal_vm_StackChunk::set_bottom(this, value); }
66
67 inline int stackChunkOopDesc::sp() const { return jdk_internal_vm_StackChunk::sp(as_oop()); }
68 inline void stackChunkOopDesc::set_sp(int value) { jdk_internal_vm_StackChunk::set_sp(this, value); }
69
70 inline address stackChunkOopDesc::pc() const { return jdk_internal_vm_StackChunk::pc(as_oop()); }
71 inline void stackChunkOopDesc::set_pc(address value) { jdk_internal_vm_StackChunk::set_pc(this, value); }
72
73 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); }
74 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); }
75
76 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); }
77
78 inline void stackChunkOopDesc::release_set_flags(uint8_t value) {
79 jdk_internal_vm_StackChunk::release_set_flags(this, value);
80 }
81
82 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) {
83 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags);
84 }
85
86 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); }
87 inline void stackChunkOopDesc::set_max_thawing_size(int value) {
88 assert(value >= 0, "size must be >= 0");
89 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value);
90 }
91
92 inline uint8_t stackChunkOopDesc::lockstack_size() const { return jdk_internal_vm_StackChunk::lockStackSize(as_oop()); }
93 inline void stackChunkOopDesc::set_lockstack_size(uint8_t value) { jdk_internal_vm_StackChunk::set_lockStackSize(this, value); }
94
95 inline oop stackChunkOopDesc::cont() const { return jdk_internal_vm_StackChunk::cont(as_oop()); }
96 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); }
97 template<typename P>
98 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); }
99 template<DecoratorSet decorators>
100 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); }
101
102 inline int stackChunkOopDesc::argsize() const {
103 assert(!is_empty(), "should not ask for argsize in empty chunk");
104 return stack_size() - bottom() - frame::metadata_words_at_top;
105 }
106
107 inline HeapWord* stackChunkOopDesc::start_of_stack() const {
108 return (HeapWord*)(cast_from_oop<intptr_t>(as_oop()) + InstanceStackChunkKlass::offset_of_stack());
109 }
110
111 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)start_of_stack(); }
112 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); }
113 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); }
114 inline intptr_t* stackChunkOopDesc::sp_address() const { return start_address() + sp(); }
115
116 inline int stackChunkOopDesc::to_offset(intptr_t* p) const {
117 assert(is_in_chunk(p)
118 || (p >= start_address() && (p - start_address()) <= stack_size() + frame::metadata_words),
119 "p: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address()));
120 return (int)(p - start_address());
121 }
122
123 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const {
124 assert(offset <= stack_size(), "");
125 return start_address() + offset;
126 }
127
128 inline bool stackChunkOopDesc::is_empty() const {
129 assert(sp() <= bottom(), "");
130 return sp() == bottom();
131 }
132
133 inline bool stackChunkOopDesc::is_in_chunk(void* p) const {
134 HeapWord* start = (HeapWord*)start_address();
135 HeapWord* end = start + stack_size();
136 return (HeapWord*)p >= start && (HeapWord*)p < end;
137 }
138
139 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const {
140 HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom;
141 HeapWord* end = start + stack_size();
142 return (HeapWord*)p >= start && (HeapWord*)p < end;
143 }
144
145 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const {
146 return (flags() & flag) != 0;
147 }
148 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const {
149 return (flags_acquire() & flag) != 0;
150 }
151 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) {
152 uint32_t flags = this->flags();
153 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag));
154 }
155 inline void stackChunkOopDesc::clear_flags() {
156 set_flags(0);
157 }
158
159 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); }
160 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) {
161 assert((flags() & ~(FLAG_HAS_INTERPRETED_FRAMES | FLAG_PREEMPTED)) == 0, "other flags should not be set");
162 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value);
163 }
164
165 inline bool stackChunkOopDesc::preempted() const { return is_flag(FLAG_PREEMPTED); }
166 inline void stackChunkOopDesc::set_preempted(bool value) {
167 assert(preempted() != value, "");
168 set_flag(FLAG_PREEMPTED, value);
169 }
170
171 inline bool stackChunkOopDesc::at_klass_init() const { return jdk_internal_vm_StackChunk::atKlassInit(as_oop()); }
172 inline void stackChunkOopDesc::set_at_klass_init(bool value) {
173 assert(at_klass_init() != value, "");
174 jdk_internal_vm_StackChunk::set_atKlassInit(this, value);
175 }
176
177 inline bool stackChunkOopDesc::has_args_at_top() const { return jdk_internal_vm_StackChunk::hasArgsAtTop(as_oop()); }
178 inline void stackChunkOopDesc::set_has_args_at_top(bool value) {
179 assert(has_args_at_top() != value, "");
180 jdk_internal_vm_StackChunk::set_hasArgsAtTop(this, value);
181 }
182
183 inline bool stackChunkOopDesc::has_lockstack() const { return is_flag(FLAG_HAS_LOCKSTACK); }
184 inline void stackChunkOopDesc::set_has_lockstack(bool value) { set_flag(FLAG_HAS_LOCKSTACK, value); }
185
186 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); }
187 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); }
188 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); }
189
190 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); }
191 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); }
192
193 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; }
194
195 inline bool stackChunkOopDesc::requires_barriers() {
196 return Universe::heap()->requires_barriers(this);
197 }
198
199 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT>
200 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
201 if (frame_kind == ChunkFrames::Mixed) {
202 // we could freeze deopted frames in slow mode.
203 f.handle_deopted();
204 }
205 do_barriers0<barrier>(f, map);
206 }
207
208 template <typename OopT, class StackChunkLockStackClosureType>
209 inline void stackChunkOopDesc::iterate_lockstack(StackChunkLockStackClosureType* closure) {
210 int cnt = lockstack_size();
211 intptr_t* lockstart_addr = start_address();
212 for (int i = 0; i < cnt; i++) {
213 closure->do_oop((OopT*)&lockstart_addr[i]);
214 }
215 }
216
217 template <class StackChunkFrameClosureType>
218 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
219 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure)
220 : iterate_stack<ChunkFrames::CompiledOnly>(closure);
221 }
222
223 template <ChunkFrames frame_kind, class StackChunkFrameClosureType>
224 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) {
225 const auto* map = SmallRegisterMap::instance_no_args();
226 assert(!map->in_cont(), "");
227
228 StackChunkFrameStream<frame_kind> f(this);
229 bool should_continue = true;
230
231 if (f.is_stub()) {
232 RegisterMap full_map(nullptr,
233 RegisterMap::UpdateMap::include,
234 RegisterMap::ProcessFrames::skip,
235 RegisterMap::WalkContinuation::include);
236 full_map.set_include_argument_oops(false);
237 closure->do_frame(f, map);
238
239 f.next(&full_map);
240 assert(!f.is_done(), "");
241 assert(f.is_compiled(), "");
242
243 should_continue = closure->do_frame(f, &full_map);
244 f.next(map);
245 } else if (frame_kind == ChunkFrames::Mixed && f.is_interpreted() && has_args_at_top()) {
246 should_continue = closure->do_frame(f, SmallRegisterMap::instance_with_args());
247 f.next(map);
248 }
249 assert(!f.is_stub(), "");
250
251 for(; should_continue && !f.is_done(); f.next(map)) {
252 if (frame_kind == ChunkFrames::Mixed) {
253 // in slow mode we might freeze deoptimized frames
254 f.handle_deopted();
255 }
256 should_continue = closure->do_frame(f, map);
257 }
258 }
259
260 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; }
261 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; }
262
263 inline void* stackChunkOopDesc::gc_data() const {
264 int stack_sz = stack_size();
265 assert(stack_sz != 0, "stack should not be empty");
266
267 // The gc data is located after the stack.
268 return start_of_stack() + stack_sz;
269 }
270
271 inline BitMapView stackChunkOopDesc::bitmap() const {
272 HeapWord* bitmap_addr = static_cast<HeapWord*>(gc_data());
273 int stack_sz = stack_size();
274 size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz);
275
276 BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits);
277
278 DEBUG_ONLY(bitmap.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));)
279
280 return bitmap;
281 }
282
283 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const {
284 return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p);
285 }
286
287 template <typename OopT>
288 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const {
289 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p));
290 assert(p >= (OopT*)start_address(), "Address not in chunk");
291 return p - (OopT*)start_address();
292 }
293
294 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
295 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index);
296 }
297
298 template <typename OopT>
299 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const {
300 return (OopT*)start_address() + index;
301 }
302
303 inline MemRegion stackChunkOopDesc::range() {
304 return MemRegion((HeapWord*)this, size());
305 }
306
307 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const {
308 assert(fr.is_compiled_frame() || fr.cb()->is_runtime_stub(), "");
309 assert(is_in_chunk(fr.unextended_sp()), "");
310
311 intptr_t* base = fr.real_fp(); // equal to the caller's sp
312 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes);
313 assert(base > loc, "");
314 return (int)(base - loc);
315 }
316
317 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const {
318 assert(fr.is_compiled_frame(), "");
319 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes;
320 }
321
322 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const {
323 assert(fr.is_compiled_frame(), "");
324 assert(map != nullptr, "");
325 assert(map->stack_chunk() == as_oop(), "");
326
327 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words
328 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case
329 intptr_t* base = derelativize_address(fr.offset_sp());
330 return (address)(base - offset);
331 }
332
333 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) {
334 return derelativize(fr).interpreter_frame_method();
335 }
336
337 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) {
338 return derelativize(fr).interpreter_frame_bcp();
339 }
340
341 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const {
342 frame heap_frame = derelativize(fr);
343 assert(heap_frame.is_heap_frame(), "must be");
344 return heap_frame.interpreter_frame_expression_stack_at(index);
345 }
346
347 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const {
348 frame heap_frame = derelativize(fr);
349 assert(heap_frame.is_heap_frame(), "must be");
350 return heap_frame.interpreter_frame_local_at(index);
351 }
352
353 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) {
354 log_develop_trace(continuations)("Copying from v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)",
355 p2i(from), p2i(from + size), size, size << LogBytesPerWord);
356 log_develop_trace(continuations)("Copying to h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
357 p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(),
358 relative_base() - (to + size), size, size << LogBytesPerWord);
359
360 assert(to >= start_address(), "Chunk underflow");
361 assert(to + size <= end_address(), "Chunk overflow");
362
363 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO)
364 // Suppress compilation warning-as-error on unimplemented architectures
365 // that stub out arch-specific methods. Some compilers are smart enough
366 // to figure out the argument is always null and then warn about it.
367 if (to != nullptr)
368 #endif
369 memcpy(to, from, size << LogBytesPerWord);
370 }
371
372 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) {
373 log_develop_trace(continuations)("Copying from h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)",
374 p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(),
375 relative_base() - (from + size), size, size << LogBytesPerWord);
376 log_develop_trace(continuations)("Copying to v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", p2i(to),
377 p2i(to + size), size, size << LogBytesPerWord);
378
379 assert(from >= start_address(), "");
380 assert(from + size <= end_address(), "");
381
382 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO)
383 // Suppress compilation warning-as-error on unimplemented architectures
384 // that stub out arch-specific methods. Some compilers are smart enough
385 // to figure out the argument is always null and then warn about it.
386 if (to != nullptr)
387 #endif
388 memcpy(to, from, size << LogBytesPerWord);
389 }
390
391 template <typename OopT>
392 inline oop stackChunkOopDesc::load_oop(OopT* addr) {
393 return BarrierSet::barrier_set()->barrier_set_stack_chunk()->load_oop(this, addr);
394 }
395
396 inline intptr_t* stackChunkOopDesc::relative_base() const {
397 // we relativize with respect to end rather than start because GC might compact the chunk
398 return end_address() + frame::metadata_words;
399 }
400
401 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const {
402 intptr_t* base = relative_base();
403 intptr_t* p = base - offset;
404 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT,
405 p2i(start_address()), p2i(p), p2i(base));
406 return p;
407 }
408
409 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const {
410 intptr_t* base = relative_base();
411 intptr_t offset = base - p;
412 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT,
413 p2i(start_address()), p2i(p), p2i(base));
414 assert(0 <= offset && offset <= std::numeric_limits<int>::max(), "offset: " PTR_FORMAT, offset);
415 return (int)offset;
416 }
417
418 inline void stackChunkOopDesc::relativize_frame(frame& fr) const {
419 fr.set_offset_sp(relativize_address(fr.sp()));
420 fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp()));
421 relativize_frame_pd(fr);
422 }
423
424 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const {
425 fr.set_sp(derelativize_address(fr.offset_sp()));
426 fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp()));
427 derelativize_frame_pd(fr);
428 fr.set_frame_index(-1); // for the sake of assertions in frame
429 }
430
431 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP