1 /* 2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP 26 #define CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP 27 28 #include "code/codeBlob.inline.hpp" 29 #include "oops/stackChunkOop.inline.hpp" 30 #include "runtime/frame.hpp" 31 #include "runtime/frame.inline.hpp" 32 33 34 inline void patch_callee_link(const frame& f, intptr_t* fp) { 35 DEBUG_ONLY(intptr_t* orig = *ContinuationHelper::Frame::callee_link_address(f)); 36 *ContinuationHelper::Frame::callee_link_address(f) = fp; 37 } 38 39 inline void patch_callee_link_relative(const frame& f, intptr_t* fp) { 40 intptr_t* la = (intptr_t*)ContinuationHelper::Frame::callee_link_address(f); 41 intptr_t new_value = fp - la; 42 *la = new_value; 43 } 44 45 ////// Freeze 46 47 // Fast path 48 49 inline void FreezeBase::patch_stack_pd(intptr_t* frame_sp, intptr_t* heap_sp) { 50 // copy the spilled fp from the heap to the stack 51 *(frame_sp - frame::sender_sp_offset) = *(heap_sp - frame::sender_sp_offset); 52 } 53 54 // Slow path 55 56 template<typename FKind> 57 inline frame FreezeBase::sender(const frame& f) { 58 assert(FKind::is_instance(f), ""); 59 if (FKind::interpreted) { 60 return frame(f.sender_sp(), f.interpreter_frame_sender_sp(), f.link(), f.sender_pc()); 61 } 62 intptr_t** link_addr = link_address<FKind>(f); 63 64 intptr_t* sender_sp = (intptr_t*)(link_addr + frame::sender_sp_offset); // f.unextended_sp() + (fsize/wordSize); // 65 address sender_pc = ContinuationHelper::return_address_at(sender_sp - 1); 66 assert(sender_sp != f.sp(), "must have changed"); 67 68 int slot = 0; 69 CodeBlob* sender_cb = CodeCache::find_blob_and_oopmap(sender_pc, slot); 70 71 // Repair the sender sp if the frame has been extended 72 if (sender_cb->is_nmethod()) { 73 sender_sp = f.repair_sender_sp(sender_sp, link_addr); 74 } 75 76 return sender_cb != nullptr 77 ? frame(sender_sp, sender_sp, *link_addr, sender_pc, sender_cb, 78 slot == -1 ? nullptr : sender_cb->oop_map_for_slot(slot, sender_pc), 79 false /* on_heap ? */) 80 : frame(sender_sp, sender_sp, *link_addr, sender_pc); 81 } 82 83 template<typename FKind> 84 frame FreezeBase::new_heap_frame(frame& f, frame& caller, int size_adjust) { 85 assert(FKind::is_instance(f), ""); 86 assert(!caller.is_interpreted_frame() 87 || caller.unextended_sp() == (intptr_t*)caller.at(frame::interpreter_frame_last_sp_offset), ""); 88 89 intptr_t *sp, *fp; // sp is really our unextended_sp 90 if (FKind::interpreted) { 91 assert((intptr_t*)f.at(frame::interpreter_frame_last_sp_offset) == nullptr 92 || f.unextended_sp() == (intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset), ""); 93 intptr_t locals_offset = *f.addr_at(frame::interpreter_frame_locals_offset); 94 // If the caller.is_empty(), i.e. we're freezing into an empty chunk, then we set 95 // the chunk's argsize in finalize_freeze and make room for it above the unextended_sp 96 bool overlap_caller = caller.is_interpreted_frame() || caller.is_empty(); 97 fp = caller.unextended_sp() - 1 - locals_offset + (overlap_caller ? ContinuationHelper::InterpretedFrame::stack_argsize(f) : 0); 98 sp = fp - (f.fp() - f.unextended_sp()); 99 assert(sp <= fp, ""); 100 assert(fp <= caller.unextended_sp(), ""); 101 caller.set_sp(fp + frame::sender_sp_offset); 102 103 assert(_cont.tail()->is_in_chunk(sp), ""); 104 105 frame hf(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); 106 // copy relativized locals from the stack frame 107 *hf.addr_at(frame::interpreter_frame_locals_offset) = locals_offset; 108 return hf; 109 } else { 110 // For a compiled frame we need to re-read fp out of the frame because it may be an 111 // oop and we might have had a safepoint in finalize_freeze, after constructing f. 112 // For stub/native frames the value is not used while frozen, and will be constructed again 113 // when thawing the frame (see ThawBase::new_stack_frame). We use a special bad address to 114 // help with debugging, particularly when inspecting frames and identifying invalid accesses. 115 fp = FKind::compiled ? *(intptr_t**)(f.sp() - frame::sender_sp_offset) : (intptr_t*)badAddressVal; 116 117 int fsize = FKind::size(f); 118 sp = caller.unextended_sp() - fsize - size_adjust; 119 if (caller.is_interpreted_frame() && size_adjust == 0) { 120 // If the caller is interpreted, our stackargs are not supposed to overlap with it 121 // so we make more room by moving sp down by argsize 122 int argsize = FKind::stack_argsize(f); 123 sp -= argsize; 124 } 125 caller.set_sp(sp + fsize); 126 127 assert(_cont.tail()->is_in_chunk(sp), ""); 128 129 return frame(sp, sp, fp, f.pc(), nullptr, nullptr, true /* on_heap */); 130 } 131 } 132 133 void FreezeBase::adjust_interpreted_frame_unextended_sp(frame& f) { 134 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) || (f.unextended_sp() == f.sp()), ""); 135 intptr_t* real_unextended_sp = (intptr_t*)f.at_relative_or_null(frame::interpreter_frame_last_sp_offset); 136 if (real_unextended_sp != nullptr) { 137 f.set_unextended_sp(real_unextended_sp); // can be null at a safepoint 138 } 139 } 140 141 inline void FreezeBase::prepare_freeze_interpreted_top_frame(frame& f) { 142 assert(f.interpreter_frame_last_sp() == nullptr, "should be null for top frame"); 143 f.interpreter_frame_set_last_sp(f.unextended_sp()); 144 } 145 146 inline void FreezeBase::relativize_interpreted_frame_metadata(const frame& f, const frame& hf) { 147 assert(hf.fp() == hf.unextended_sp() + (f.fp() - f.unextended_sp()), ""); 148 assert((f.at(frame::interpreter_frame_last_sp_offset) != 0) 149 || (f.unextended_sp() == f.sp()), ""); 150 assert(f.fp() > (intptr_t*)f.at_relative(frame::interpreter_frame_initial_sp_offset), ""); 151 152 // on AARCH64, we may insert padding between the locals and the rest of the frame 153 // (see TemplateInterpreterGenerator::generate_normal_entry, and AbstractInterpreter::layout_activation) 154 // because we freeze the padding word (see recurse_freeze_interpreted_frame) in order to keep the same relativized 155 // locals value, we don't need to change the locals value here. 156 157 // Make sure that last_sp is already relativized. 158 assert((intptr_t*)hf.at_relative(frame::interpreter_frame_last_sp_offset) == hf.unextended_sp(), ""); 159 160 // Make sure that monitor_block_top is already relativized. 161 assert(hf.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, ""); 162 163 // extended_sp is already relativized by TemplateInterpreterGenerator::generate_normal_entry or 164 // AbstractInterpreter::layout_activation 165 166 // The interpreter native wrapper code adds space in the stack equal to size_of_parameters() 167 // after the fixed part of the frame. For wait0 this is equal to 3 words (this + long parameter). 168 // We adjust by this size since otherwise the saved last sp will be less than the extended_sp. 169 DEBUG_ONLY(Method* m = hf.interpreter_frame_method();) 170 DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;) 171 172 assert((hf.fp() - hf.unextended_sp()) == (f.fp() - f.unextended_sp()), ""); 173 assert(hf.unextended_sp() == (intptr_t*)hf.at(frame::interpreter_frame_last_sp_offset), ""); 174 assert(hf.unextended_sp() <= (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), ""); 175 assert(hf.unextended_sp() + extra_space > (intptr_t*)hf.at(frame::interpreter_frame_extended_sp_offset), ""); 176 assert(hf.fp() > (intptr_t*)hf.at(frame::interpreter_frame_initial_sp_offset), ""); 177 assert(hf.fp() <= (intptr_t*)hf.at(frame::interpreter_frame_locals_offset), ""); 178 } 179 180 inline void FreezeBase::set_top_frame_metadata_pd(const frame& hf) { 181 stackChunkOop chunk = _cont.tail(); 182 assert(chunk->is_in_chunk(hf.sp() - 1), ""); 183 assert(chunk->is_in_chunk(hf.sp() - frame::sender_sp_offset), ""); 184 185 *(hf.sp() - 1) = (intptr_t)hf.pc(); 186 187 intptr_t* fp_addr = hf.sp() - frame::sender_sp_offset; 188 *fp_addr = hf.is_interpreted_frame() ? (intptr_t)(hf.fp() - fp_addr) 189 : (intptr_t)hf.fp(); 190 } 191 192 inline void FreezeBase::patch_pd(frame& hf, const frame& caller, bool is_bottom_frame) { 193 if (caller.is_interpreted_frame()) { 194 assert(!caller.is_empty(), ""); 195 patch_callee_link_relative(caller, caller.fp()); 196 } else if (is_bottom_frame && caller.pc() != nullptr) { 197 assert(caller.is_compiled_frame(), ""); 198 // If we're the bottom-most frame frozen in this freeze, the caller might have stayed frozen in the chunk, 199 // and its oop-containing fp fixed. We've now just overwritten it, so we must patch it back to its value 200 // as read from the chunk. 201 patch_callee_link(caller, caller.fp()); 202 } 203 } 204 205 inline void FreezeBase::patch_pd_unused(intptr_t* sp) { 206 intptr_t* fp_addr = sp - frame::sender_sp_offset; 207 *fp_addr = badAddressVal; 208 } 209 210 //////// Thaw 211 212 // Fast path 213 214 inline void ThawBase::prefetch_chunk_pd(void* start, int size) { 215 size <<= LogBytesPerWord; 216 Prefetch::read(start, size); 217 Prefetch::read(start, size - 64); 218 } 219 220 template <typename ConfigT> 221 inline void Thaw<ConfigT>::patch_caller_links(intptr_t* sp, intptr_t* bottom) { 222 // Fast path depends on !PreserveFramePointer. See can_thaw_fast(). 223 assert(!PreserveFramePointer, "Frame pointers need to be fixed"); 224 } 225 226 // Slow path 227 228 inline frame ThawBase::new_entry_frame() { 229 intptr_t* sp = _cont.entrySP(); 230 return frame(sp, sp, _cont.entryFP(), _cont.entryPC()); // TODO PERF: This finds code blob and computes deopt state 231 } 232 233 template<typename FKind> frame ThawBase::new_stack_frame(const frame& hf, frame& caller, bool bottom, int size_adjust) { 234 assert(FKind::is_instance(hf), ""); 235 // The values in the returned frame object will be written into the callee's stack in patch. 236 237 if (FKind::interpreted) { 238 intptr_t* heap_sp = hf.unextended_sp(); 239 // If caller is interpreted it already made room for the callee arguments 240 int overlap = caller.is_interpreted_frame() ? ContinuationHelper::InterpretedFrame::stack_argsize(hf) : 0; 241 const int fsize = (int)(ContinuationHelper::InterpretedFrame::frame_bottom(hf) - hf.unextended_sp() - overlap); 242 intptr_t* frame_sp = caller.unextended_sp() - fsize; 243 intptr_t* fp = frame_sp + (hf.fp() - heap_sp); 244 if ((intptr_t)fp % frame::frame_alignment != 0) { 245 fp--; 246 frame_sp--; 247 log_develop_trace(continuations)("Adding internal interpreted frame alignment"); 248 } 249 DEBUG_ONLY(intptr_t* unextended_sp = fp + *hf.addr_at(frame::interpreter_frame_last_sp_offset);) 250 assert(frame_sp == unextended_sp, ""); 251 caller.set_sp(fp + frame::sender_sp_offset); 252 frame f(frame_sp, frame_sp, fp, hf.pc()); 253 // we need to set the locals so that the caller of new_stack_frame() can call 254 // ContinuationHelper::InterpretedFrame::frame_bottom 255 // copy relativized locals from the heap frame 256 *f.addr_at(frame::interpreter_frame_locals_offset) = *hf.addr_at(frame::interpreter_frame_locals_offset); 257 assert((intptr_t)f.fp() % frame::frame_alignment == 0, ""); 258 return f; 259 } else { 260 int fsize = FKind::size(hf); 261 intptr_t* frame_sp = caller.unextended_sp() - fsize - size_adjust; 262 if (bottom || caller.is_interpreted_frame()) { 263 if (size_adjust == 0) { 264 int argsize = FKind::stack_argsize(hf); 265 frame_sp -= argsize; 266 } 267 frame_sp = align(hf, frame_sp, caller, bottom); 268 } 269 caller.set_sp(frame_sp + fsize); 270 assert(is_aligned(frame_sp, frame::frame_alignment), ""); 271 272 assert(hf.cb() != nullptr, ""); 273 assert(hf.oop_map() != nullptr, ""); 274 intptr_t* fp; 275 if (PreserveFramePointer) { 276 // we need to recreate a "real" frame pointer, pointing into the stack 277 fp = frame_sp + fsize - frame::sender_sp_offset; 278 } else { 279 fp = FKind::stub || FKind::native 280 ? frame_sp + fsize - frame::sender_sp_offset // fp always points to the address below the pushed return pc. We need correct address. 281 : *(intptr_t**)(hf.sp() - frame::sender_sp_offset); // we need to re-read fp because it may be an oop and we might have fixed the frame. 282 } 283 return frame(frame_sp, frame_sp, fp, hf.pc(), hf.cb(), hf.oop_map(), false); // TODO PERF : this computes deopt state; is it necessary? 284 } 285 } 286 287 inline intptr_t* ThawBase::align(const frame& hf, intptr_t* frame_sp, frame& caller, bool bottom) { 288 #ifdef _LP64 289 if (((intptr_t)frame_sp & 0xf) != 0) { 290 assert(caller.is_interpreted_frame() || (bottom && hf.compiled_frame_stack_argsize() % 2 != 0), ""); 291 frame_sp--; 292 } 293 assert(is_aligned(frame_sp, frame::frame_alignment), ""); 294 #endif 295 return frame_sp; 296 } 297 298 inline void ThawBase::patch_pd(frame& f, const frame& caller) { 299 if (caller.is_interpreted_frame() || PreserveFramePointer) { 300 patch_callee_link(caller, caller.fp()); 301 } 302 } 303 304 inline void ThawBase::patch_pd(frame& f, intptr_t* caller_sp) { 305 intptr_t* fp = caller_sp - frame::sender_sp_offset; 306 patch_callee_link(f, fp); 307 } 308 309 inline intptr_t* ThawBase::push_cleanup_continuation() { 310 frame enterSpecial = new_entry_frame(); 311 intptr_t* sp = enterSpecial.sp(); 312 313 sp[-1] = (intptr_t)ContinuationEntry::cleanup_pc(); 314 sp[-2] = (intptr_t)enterSpecial.fp(); 315 316 log_develop_trace(continuations, preempt)("push_cleanup_continuation initial sp: " INTPTR_FORMAT " final sp: " INTPTR_FORMAT, p2i(sp + 2 * frame::metadata_words), p2i(sp)); 317 return sp; 318 } 319 320 inline void ThawBase::derelativize_interpreted_frame_metadata(const frame& hf, const frame& f) { 321 // Make sure that last_sp is kept relativized. 322 assert((intptr_t*)f.at_relative(frame::interpreter_frame_last_sp_offset) == f.unextended_sp(), ""); 323 324 // Make sure that monitor_block_top is still relativized. 325 assert(f.at_absolute(frame::interpreter_frame_monitor_block_top_offset) <= frame::interpreter_frame_initial_sp_offset, ""); 326 327 // Make sure that extended_sp is kept relativized. 328 DEBUG_ONLY(Method* m = hf.interpreter_frame_method();) 329 DEBUG_ONLY(int extra_space = m->is_object_wait0() ? m->size_of_parameters() : 0;) // see comment in relativize_interpreted_frame_metadata() 330 assert((intptr_t*)f.at_relative(frame::interpreter_frame_extended_sp_offset) < f.unextended_sp() + extra_space, ""); 331 } 332 333 #endif // CPU_AARCH64_CONTINUATIONFREEZETHAW_AARCH64_INLINE_HPP