1 /* 2 * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP 26 #define SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP 27 28 #include "oops/stackChunkOop.hpp" 29 30 #include "gc/shared/collectedHeap.hpp" 31 #include "gc/shared/barrierSet.hpp" 32 #include "gc/shared/barrierSetStackChunk.hpp" 33 #include "gc/shared/gc_globals.hpp" 34 #include "memory/memRegion.hpp" 35 #include "memory/universe.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/instanceStackChunkKlass.inline.hpp" 38 #include "runtime/continuationJavaClasses.inline.hpp" 39 #include "runtime/frame.inline.hpp" 40 #include "runtime/globals.hpp" 41 #include "runtime/handles.inline.hpp" 42 #include "runtime/registerMap.hpp" 43 #include "runtime/smallRegisterMap.inline.hpp" 44 #include "utilities/macros.hpp" 45 #include CPU_HEADER_INLINE(stackChunkOop) 46 47 DEF_HANDLE_CONSTR(stackChunk, is_stackChunk_noinline) 48 49 inline stackChunkOop stackChunkOopDesc::cast(oop obj) { 50 assert(obj == nullptr || obj->is_stackChunk(), "Wrong type"); 51 return stackChunkOop(obj); 52 } 53 54 inline stackChunkOop stackChunkOopDesc::parent() const { return stackChunkOopDesc::cast(jdk_internal_vm_StackChunk::parent(as_oop())); } 55 inline void stackChunkOopDesc::set_parent(stackChunkOop value) { jdk_internal_vm_StackChunk::set_parent(this, value); } 56 template<typename P> 57 inline void stackChunkOopDesc::set_parent_raw(oop value) { jdk_internal_vm_StackChunk::set_parent_raw<P>(this, value); } 58 template<DecoratorSet decorators> 59 inline void stackChunkOopDesc::set_parent_access(oop value) { jdk_internal_vm_StackChunk::set_parent_access<decorators>(this, value); } 60 61 inline int stackChunkOopDesc::stack_size() const { return jdk_internal_vm_StackChunk::size(as_oop()); } 62 63 inline int stackChunkOopDesc::bottom() const { return jdk_internal_vm_StackChunk::bottom(as_oop()); } 64 inline void stackChunkOopDesc::set_bottom(int value) { jdk_internal_vm_StackChunk::set_bottom(this, value); } 65 66 inline int stackChunkOopDesc::sp() const { return jdk_internal_vm_StackChunk::sp(as_oop()); } 67 inline void stackChunkOopDesc::set_sp(int value) { jdk_internal_vm_StackChunk::set_sp(this, value); } 68 69 inline address stackChunkOopDesc::pc() const { return jdk_internal_vm_StackChunk::pc(as_oop()); } 70 inline void stackChunkOopDesc::set_pc(address value) { jdk_internal_vm_StackChunk::set_pc(this, value); } 71 72 inline uint8_t stackChunkOopDesc::flags() const { return jdk_internal_vm_StackChunk::flags(as_oop()); } 73 inline void stackChunkOopDesc::set_flags(uint8_t value) { jdk_internal_vm_StackChunk::set_flags(this, value); } 74 75 inline uint8_t stackChunkOopDesc::flags_acquire() const { return jdk_internal_vm_StackChunk::flags_acquire(as_oop()); } 76 77 inline void stackChunkOopDesc::release_set_flags(uint8_t value) { 78 jdk_internal_vm_StackChunk::release_set_flags(this, value); 79 } 80 81 inline bool stackChunkOopDesc::try_set_flags(uint8_t prev_flags, uint8_t new_flags) { 82 return jdk_internal_vm_StackChunk::try_set_flags(this, prev_flags, new_flags); 83 } 84 85 inline int stackChunkOopDesc::max_thawing_size() const { return jdk_internal_vm_StackChunk::maxThawingSize(as_oop()); } 86 inline void stackChunkOopDesc::set_max_thawing_size(int value) { 87 assert(value >= 0, "size must be >= 0"); 88 jdk_internal_vm_StackChunk::set_maxThawingSize(this, (jint)value); 89 } 90 91 inline oop stackChunkOopDesc::cont() const { 92 if (UseZGC && !ZGenerational) { 93 assert(!UseCompressedOops, "Non-generational ZGC does not support compressed oops"); 94 // The state of the cont oop is used by XCollectedHeap::requires_barriers, 95 // to determine the age of the stackChunkOopDesc. For that to work, it is 96 // only the GC that is allowed to perform a load barrier on the oop. 97 // This function is used by non-GC code and therfore create a stack-local 98 // copy on the oop and perform the load barrier on that copy instead. 99 oop obj = jdk_internal_vm_StackChunk::cont_raw<oop>(as_oop()); 100 obj = (oop)NativeAccess<>::oop_load(&obj); 101 return obj; 102 } 103 return jdk_internal_vm_StackChunk::cont(as_oop()); 104 } 105 inline void stackChunkOopDesc::set_cont(oop value) { jdk_internal_vm_StackChunk::set_cont(this, value); } 106 template<typename P> 107 inline void stackChunkOopDesc::set_cont_raw(oop value) { jdk_internal_vm_StackChunk::set_cont_raw<P>(this, value); } 108 template<DecoratorSet decorators> 109 inline void stackChunkOopDesc::set_cont_access(oop value) { jdk_internal_vm_StackChunk::set_cont_access<decorators>(this, value); } 110 111 inline int stackChunkOopDesc::argsize() const { 112 assert(!is_empty(), "should not ask for argsize in empty chunk"); 113 return stack_size() - bottom() - frame::metadata_words_at_top; 114 } 115 116 inline HeapWord* stackChunkOopDesc::start_of_stack() const { 117 return (HeapWord*)(cast_from_oop<intptr_t>(as_oop()) + InstanceStackChunkKlass::offset_of_stack()); 118 } 119 120 inline intptr_t* stackChunkOopDesc::start_address() const { return (intptr_t*)start_of_stack(); } 121 inline intptr_t* stackChunkOopDesc::end_address() const { return start_address() + stack_size(); } 122 inline intptr_t* stackChunkOopDesc::bottom_address() const { return start_address() + bottom(); } 123 inline intptr_t* stackChunkOopDesc::sp_address() const { return start_address() + sp(); } 124 125 inline int stackChunkOopDesc::to_offset(intptr_t* p) const { 126 assert(is_in_chunk(p) 127 || (p >= start_address() && (p - start_address()) <= stack_size() + frame::metadata_words), 128 "p: " PTR_FORMAT " start: " PTR_FORMAT " end: " PTR_FORMAT, p2i(p), p2i(start_address()), p2i(bottom_address())); 129 return (int)(p - start_address()); 130 } 131 132 inline intptr_t* stackChunkOopDesc::from_offset(int offset) const { 133 assert(offset <= stack_size(), ""); 134 return start_address() + offset; 135 } 136 137 inline bool stackChunkOopDesc::is_empty() const { 138 assert(sp() <= bottom(), ""); 139 return sp() == bottom(); 140 } 141 142 inline bool stackChunkOopDesc::is_in_chunk(void* p) const { 143 HeapWord* start = (HeapWord*)start_address(); 144 HeapWord* end = start + stack_size(); 145 return (HeapWord*)p >= start && (HeapWord*)p < end; 146 } 147 148 bool stackChunkOopDesc::is_usable_in_chunk(void* p) const { 149 HeapWord* start = (HeapWord*)start_address() + sp() - frame::metadata_words_at_bottom; 150 HeapWord* end = start + stack_size(); 151 return (HeapWord*)p >= start && (HeapWord*)p < end; 152 } 153 154 inline bool stackChunkOopDesc::is_flag(uint8_t flag) const { 155 return (flags() & flag) != 0; 156 } 157 inline bool stackChunkOopDesc::is_flag_acquire(uint8_t flag) const { 158 return (flags_acquire() & flag) != 0; 159 } 160 inline void stackChunkOopDesc::set_flag(uint8_t flag, bool value) { 161 uint32_t flags = this->flags(); 162 set_flags((uint8_t)(value ? flags |= flag : flags &= ~flag)); 163 } 164 inline void stackChunkOopDesc::clear_flags() { 165 set_flags(0); 166 } 167 168 inline bool stackChunkOopDesc::has_mixed_frames() const { return is_flag(FLAG_HAS_INTERPRETED_FRAMES); } 169 inline void stackChunkOopDesc::set_has_mixed_frames(bool value) { 170 assert((flags() & ~FLAG_HAS_INTERPRETED_FRAMES) == 0, "other flags should not be set"); 171 set_flag(FLAG_HAS_INTERPRETED_FRAMES, value); 172 } 173 174 inline bool stackChunkOopDesc::is_gc_mode() const { return is_flag(FLAG_GC_MODE); } 175 inline bool stackChunkOopDesc::is_gc_mode_acquire() const { return is_flag_acquire(FLAG_GC_MODE); } 176 inline void stackChunkOopDesc::set_gc_mode(bool value) { set_flag(FLAG_GC_MODE, value); } 177 178 inline bool stackChunkOopDesc::has_bitmap() const { return is_flag(FLAG_HAS_BITMAP); } 179 inline void stackChunkOopDesc::set_has_bitmap(bool value) { set_flag(FLAG_HAS_BITMAP, value); } 180 181 inline bool stackChunkOopDesc::has_thaw_slowpath_condition() const { return flags() != 0; } 182 183 inline bool stackChunkOopDesc::requires_barriers() { 184 return Universe::heap()->requires_barriers(this); 185 } 186 187 template <stackChunkOopDesc::BarrierType barrier, ChunkFrames frame_kind, typename RegisterMapT> 188 void stackChunkOopDesc::do_barriers(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 189 if (frame_kind == ChunkFrames::Mixed) { 190 // we could freeze deopted frames in slow mode. 191 f.handle_deopted(); 192 } 193 do_barriers0<barrier>(f, map); 194 } 195 196 template <class StackChunkFrameClosureType> 197 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) { 198 has_mixed_frames() ? iterate_stack<ChunkFrames::Mixed>(closure) 199 : iterate_stack<ChunkFrames::CompiledOnly>(closure); 200 } 201 202 template <ChunkFrames frame_kind, class StackChunkFrameClosureType> 203 inline void stackChunkOopDesc::iterate_stack(StackChunkFrameClosureType* closure) { 204 const SmallRegisterMap* map = SmallRegisterMap::instance(); 205 assert(!map->in_cont(), ""); 206 207 StackChunkFrameStream<frame_kind> f(this); 208 bool should_continue = true; 209 210 if (f.is_stub()) { 211 RegisterMap full_map(nullptr, 212 RegisterMap::UpdateMap::include, 213 RegisterMap::ProcessFrames::skip, 214 RegisterMap::WalkContinuation::include); 215 full_map.set_include_argument_oops(false); 216 217 f.next(&full_map); 218 219 assert(!f.is_done(), ""); 220 assert(f.is_compiled(), ""); 221 222 should_continue = closure->do_frame(f, &full_map); 223 f.next(map); 224 f.handle_deopted(); // the stub caller might be deoptimized (as it's not at a call) 225 } 226 assert(!f.is_stub(), ""); 227 228 for(; should_continue && !f.is_done(); f.next(map)) { 229 if (frame_kind == ChunkFrames::Mixed) { 230 // in slow mode we might freeze deoptimized frames 231 f.handle_deopted(); 232 } 233 should_continue = closure->do_frame(f, map); 234 } 235 } 236 237 inline frame stackChunkOopDesc::relativize(frame fr) const { relativize_frame(fr); return fr; } 238 inline frame stackChunkOopDesc::derelativize(frame fr) const { derelativize_frame(fr); return fr; } 239 240 inline void* stackChunkOopDesc::gc_data() const { 241 int stack_sz = stack_size(); 242 assert(stack_sz != 0, "stack should not be empty"); 243 244 // The gc data is located after the stack. 245 return start_of_stack() + stack_sz; 246 } 247 248 inline BitMapView stackChunkOopDesc::bitmap() const { 249 HeapWord* bitmap_addr = static_cast<HeapWord*>(gc_data()); 250 int stack_sz = stack_size(); 251 size_t bitmap_size_in_bits = InstanceStackChunkKlass::bitmap_size_in_bits(stack_sz); 252 253 BitMapView bitmap((BitMap::bm_word_t*)bitmap_addr, bitmap_size_in_bits); 254 255 DEBUG_ONLY(bitmap.verify_range(bit_index_for(start_address()), bit_index_for(end_address()));) 256 257 return bitmap; 258 } 259 260 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(address p) const { 261 return UseCompressedOops ? bit_index_for((narrowOop*)p) : bit_index_for((oop*)p); 262 } 263 264 template <typename OopT> 265 inline BitMap::idx_t stackChunkOopDesc::bit_index_for(OopT* p) const { 266 assert(is_aligned(p, alignof(OopT)), "should be aligned: " PTR_FORMAT, p2i(p)); 267 assert(p >= (OopT*)start_address(), "Address not in chunk"); 268 return p - (OopT*)start_address(); 269 } 270 271 inline intptr_t* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const { 272 return UseCompressedOops ? (intptr_t*)address_for_bit<narrowOop>(index) : (intptr_t*)address_for_bit<oop>(index); 273 } 274 275 template <typename OopT> 276 inline OopT* stackChunkOopDesc::address_for_bit(BitMap::idx_t index) const { 277 return (OopT*)start_address() + index; 278 } 279 280 inline MemRegion stackChunkOopDesc::range() { 281 return MemRegion((HeapWord*)this, size()); 282 } 283 284 inline int stackChunkOopDesc::relativize_usp_offset(const frame& fr, const int usp_offset_in_bytes) const { 285 assert(fr.is_compiled_frame() || fr.cb()->is_safepoint_stub(), ""); 286 assert(is_in_chunk(fr.unextended_sp()), ""); 287 288 intptr_t* base = fr.real_fp(); // equal to the caller's sp 289 intptr_t* loc = (intptr_t*)((address)fr.unextended_sp() + usp_offset_in_bytes); 290 assert(base > loc, ""); 291 return (int)(base - loc); 292 } 293 294 inline address stackChunkOopDesc::usp_offset_to_location(const frame& fr, const int usp_offset_in_bytes) const { 295 assert(fr.is_compiled_frame(), ""); 296 return (address)derelativize_address(fr.offset_unextended_sp()) + usp_offset_in_bytes; 297 } 298 299 inline address stackChunkOopDesc::reg_to_location(const frame& fr, const RegisterMap* map, VMReg reg) const { 300 assert(fr.is_compiled_frame(), ""); 301 assert(map != nullptr, ""); 302 assert(map->stack_chunk() == as_oop(), ""); 303 304 // the offsets are saved in the map after going through relativize_usp_offset, so they are sp - loc, in words 305 intptr_t offset = (intptr_t)map->location(reg, nullptr); // see usp_offset_to_index for the chunk case 306 intptr_t* base = derelativize_address(fr.offset_sp()); 307 return (address)(base - offset); 308 } 309 310 inline Method* stackChunkOopDesc::interpreter_frame_method(const frame& fr) { 311 return derelativize(fr).interpreter_frame_method(); 312 } 313 314 inline address stackChunkOopDesc::interpreter_frame_bcp(const frame& fr) { 315 return derelativize(fr).interpreter_frame_bcp(); 316 } 317 318 inline intptr_t* stackChunkOopDesc::interpreter_frame_expression_stack_at(const frame& fr, int index) const { 319 frame heap_frame = derelativize(fr); 320 assert(heap_frame.is_heap_frame(), "must be"); 321 return heap_frame.interpreter_frame_expression_stack_at(index); 322 } 323 324 inline intptr_t* stackChunkOopDesc::interpreter_frame_local_at(const frame& fr, int index) const { 325 frame heap_frame = derelativize(fr); 326 assert(heap_frame.is_heap_frame(), "must be"); 327 return heap_frame.interpreter_frame_local_at(index); 328 } 329 330 inline void stackChunkOopDesc::copy_from_stack_to_chunk(intptr_t* from, intptr_t* to, int size) { 331 log_develop_trace(continuations)("Copying from v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", 332 p2i(from), p2i(from + size), size, size << LogBytesPerWord); 333 log_develop_trace(continuations)("Copying to h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)", 334 p2i(to), to - start_address(), relative_base() - to, p2i(to + size), to + size - start_address(), 335 relative_base() - (to + size), size, size << LogBytesPerWord); 336 337 assert(to >= start_address(), "Chunk underflow"); 338 assert(to + size <= end_address(), "Chunk overflow"); 339 340 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO) 341 // Suppress compilation warning-as-error on unimplemented architectures 342 // that stub out arch-specific methods. Some compilers are smart enough 343 // to figure out the argument is always null and then warn about it. 344 if (to != nullptr) 345 #endif 346 memcpy(to, from, size << LogBytesPerWord); 347 } 348 349 inline void stackChunkOopDesc::copy_from_chunk_to_stack(intptr_t* from, intptr_t* to, int size) { 350 log_develop_trace(continuations)("Copying from h: " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") - " PTR_FORMAT "(" INTPTR_FORMAT "," INTPTR_FORMAT ") (%d words, %d bytes)", 351 p2i(from), from - start_address(), relative_base() - from, p2i(from + size), from + size - start_address(), 352 relative_base() - (from + size), size, size << LogBytesPerWord); 353 log_develop_trace(continuations)("Copying to v: " PTR_FORMAT " - " PTR_FORMAT " (%d words, %d bytes)", p2i(to), 354 p2i(to + size), size, size << LogBytesPerWord); 355 356 assert(from >= start_address(), ""); 357 assert(from + size <= end_address(), ""); 358 359 #if !(defined(AMD64) || defined(AARCH64) || defined(RISCV64) || defined(PPC64)) || defined(ZERO) 360 // Suppress compilation warning-as-error on unimplemented architectures 361 // that stub out arch-specific methods. Some compilers are smart enough 362 // to figure out the argument is always null and then warn about it. 363 if (to != nullptr) 364 #endif 365 memcpy(to, from, size << LogBytesPerWord); 366 } 367 368 template <typename OopT> 369 inline oop stackChunkOopDesc::load_oop(OopT* addr) { 370 return BarrierSet::barrier_set()->barrier_set_stack_chunk()->load_oop(this, addr); 371 } 372 373 inline intptr_t* stackChunkOopDesc::relative_base() const { 374 // we relativize with respect to end rather than start because GC might compact the chunk 375 return end_address() + frame::metadata_words; 376 } 377 378 inline intptr_t* stackChunkOopDesc::derelativize_address(int offset) const { 379 intptr_t* base = relative_base(); 380 intptr_t* p = base - offset; 381 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT, 382 p2i(start_address()), p2i(p), p2i(base)); 383 return p; 384 } 385 386 inline int stackChunkOopDesc::relativize_address(intptr_t* p) const { 387 intptr_t* base = relative_base(); 388 intptr_t offset = base - p; 389 assert(start_address() <= p && p <= base, "start_address: " PTR_FORMAT " p: " PTR_FORMAT " base: " PTR_FORMAT, 390 p2i(start_address()), p2i(p), p2i(base)); 391 assert(0 <= offset && offset <= std::numeric_limits<int>::max(), "offset: " PTR_FORMAT, offset); 392 return (int)offset; 393 } 394 395 inline void stackChunkOopDesc::relativize_frame(frame& fr) const { 396 fr.set_offset_sp(relativize_address(fr.sp())); 397 fr.set_offset_unextended_sp(relativize_address(fr.unextended_sp())); 398 relativize_frame_pd(fr); 399 } 400 401 inline void stackChunkOopDesc::derelativize_frame(frame& fr) const { 402 fr.set_sp(derelativize_address(fr.offset_sp())); 403 fr.set_unextended_sp(derelativize_address(fr.offset_unextended_sp())); 404 derelativize_frame_pd(fr); 405 fr.set_frame_index(-1); // for the sake of assertions in frame 406 } 407 408 #endif // SHARE_OOPS_STACKCHUNKOOP_INLINE_HPP