1 /* 2 * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/cdsConfig.hpp" 26 #include "cds/serializeClosure.hpp" 27 #include "classfile/vmClasses.hpp" 28 #include "compiler/oopMap.inline.hpp" 29 #include "gc/shared/gc_globals.hpp" 30 #include "memory/oopFactory.hpp" 31 #include "memory/resourceArea.hpp" 32 #include "oops/instanceStackChunkKlass.inline.hpp" 33 #include "oops/stackChunkOop.inline.hpp" 34 #include "runtime/continuation.hpp" 35 #include "runtime/continuationJavaClasses.inline.hpp" 36 #include "runtime/frame.hpp" 37 #include "runtime/handles.hpp" 38 #include "runtime/registerMap.hpp" 39 #include "runtime/smallRegisterMap.inline.hpp" 40 #include "runtime/stackChunkFrameStream.inline.hpp" 41 #include "utilities/devirtualizer.inline.hpp" 42 #include "utilities/globalDefinitions.hpp" 43 #include "utilities/macros.hpp" 44 #include "utilities/ostream.hpp" 45 46 int InstanceStackChunkKlass::_offset_of_stack = 0; 47 48 #if INCLUDE_CDS 49 void InstanceStackChunkKlass::serialize_offsets(SerializeClosure* f) { 50 f->do_int(&_offset_of_stack); 51 } 52 #endif 53 54 InstanceStackChunkKlass::InstanceStackChunkKlass() { 55 assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS"); 56 } 57 58 InstanceStackChunkKlass::InstanceStackChunkKlass(const ClassFileParser& parser) 59 : InstanceKlass(parser, Kind) { 60 // Change the layout_helper to use the slow path because StackChunkOops are 61 // variable sized InstanceOops. 62 const jint lh = Klass::instance_layout_helper(size_helper(), true); 63 set_layout_helper(lh); 64 } 65 66 size_t InstanceStackChunkKlass::oop_size(oop obj) const { 67 return instance_size(jdk_internal_vm_StackChunk::size(obj)); 68 } 69 70 #ifndef PRODUCT 71 void InstanceStackChunkKlass::oop_print_on(oop obj, outputStream* st) { 72 print_chunk(stackChunkOopDesc::cast(obj), false, st); 73 } 74 #endif 75 76 template<typename OopClosureType> 77 class StackChunkOopIterateFilterClosure: public OopClosure { 78 private: 79 OopClosureType* const _closure; 80 MemRegion _bound; 81 82 public: 83 84 StackChunkOopIterateFilterClosure(OopClosureType* closure, MemRegion bound) 85 : _closure(closure), 86 _bound(bound) {} 87 88 virtual void do_oop(oop* p) override { do_oop_work(p); } 89 virtual void do_oop(narrowOop* p) override { do_oop_work(p); } 90 91 template <typename T> 92 void do_oop_work(T* p) { 93 if (_bound.contains(p)) { 94 Devirtualizer::do_oop(_closure, p); 95 } 96 } 97 }; 98 99 class DoMethodsStackChunkFrameClosure { 100 OopIterateClosure* _closure; 101 102 public: 103 DoMethodsStackChunkFrameClosure(OopIterateClosure* cl) : _closure(cl) {} 104 105 template <ChunkFrames frame_kind, typename RegisterMapT> 106 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 107 if (f.is_interpreted()) { 108 Method* m = f.to_frame().interpreter_frame_method(); 109 _closure->do_method(m); 110 } else if (f.is_compiled()) { 111 nmethod* nm = f.cb()->as_nmethod(); 112 // The do_nmethod function takes care of having the right synchronization 113 // when keeping the nmethod alive during concurrent execution. 114 _closure->do_nmethod(nm); 115 // There is no need to mark the Method, as class redefinition will walk the 116 // CodeCache, noting their Methods 117 } 118 return true; 119 } 120 }; 121 122 void InstanceStackChunkKlass::do_methods(stackChunkOop chunk, OopIterateClosure* cl) { 123 DoMethodsStackChunkFrameClosure closure(cl); 124 chunk->iterate_stack(&closure); 125 } 126 127 class OopIterateStackChunkFrameClosure { 128 OopIterateClosure* const _closure; 129 MemRegion _bound; 130 const bool _do_metadata; 131 132 public: 133 OopIterateStackChunkFrameClosure(OopIterateClosure* closure, MemRegion mr) 134 : _closure(closure), 135 _bound(mr), 136 _do_metadata(_closure->do_metadata()) { 137 assert(_closure != nullptr, "must be set"); 138 } 139 140 template <ChunkFrames frame_kind, typename RegisterMapT> 141 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 142 if (_do_metadata) { 143 DoMethodsStackChunkFrameClosure(_closure).do_frame(f, map); 144 } 145 146 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound); 147 f.iterate_oops(&cl, map); 148 149 return true; 150 } 151 }; 152 153 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) { 154 if (UseZGC || UseShenandoahGC) { 155 // An OopClosure could apply barriers to a stack chunk. The side effects 156 // of the load barriers could destroy derived pointers, which must be 157 // processed before their base oop is processed. So we force processing 158 // of derived pointers before applying the closures. 159 chunk->relativize_derived_pointers_concurrently(); 160 } 161 OopIterateStackChunkFrameClosure frame_closure(closure, mr); 162 chunk->iterate_stack(&frame_closure); 163 } 164 165 template <typename OopT> 166 void InstanceStackChunkKlass::oop_oop_iterate_lockstack(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) { 167 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(closure, mr); 168 if (chunk->has_bitmap()) { 169 chunk->iterate_lockstack<OopT>(&cl); 170 } else { 171 chunk->iterate_lockstack<oop>(&cl); 172 } 173 } 174 175 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<oop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr); 176 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<narrowOop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr); 177 178 #ifdef ASSERT 179 180 class DescribeStackChunkClosure { 181 stackChunkOop _chunk; 182 FrameValues _values; 183 RegisterMap _map; 184 int _frame_no; 185 186 public: 187 DescribeStackChunkClosure(stackChunkOop chunk) 188 : _chunk(chunk), 189 _map(nullptr, 190 RegisterMap::UpdateMap::include, 191 RegisterMap::ProcessFrames::skip, 192 RegisterMap::WalkContinuation::include), 193 _frame_no(0) { 194 _map.set_include_argument_oops(false); 195 } 196 197 const RegisterMap* get_map(const RegisterMap* map, intptr_t* sp) { return map; } 198 const RegisterMap* get_map(const SmallRegisterMap* map, intptr_t* sp) { return map->copy_to_RegisterMap(&_map, sp); } 199 200 template <ChunkFrames frame_kind, typename RegisterMapT> 201 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) { 202 ResetNoHandleMark rnhm; 203 HandleMark hm(Thread::current()); 204 205 frame fr = f.to_frame(); 206 fr.describe(_values, _frame_no++, get_map(map, f.sp())); 207 return true; 208 } 209 210 void describe_chunk() { 211 // _values.describe(-1, _chunk->start_address(), "CHUNK START"); 212 _values.describe(-1, _chunk->sp_address(), "CHUNK SP"); 213 _values.describe(-1, _chunk->bottom_address() - 1, "CHUNK ARGS"); 214 _values.describe(-1, _chunk->end_address() - 1, "CHUNK END"); 215 } 216 217 void print_on(outputStream* out) { 218 if (_frame_no > 0) { 219 describe_chunk(); 220 _values.print_on(_chunk, out); 221 } else { 222 out->print_cr(" EMPTY"); 223 } 224 } 225 }; 226 #endif 227 228 class PrintStackChunkClosure { 229 outputStream* _st; 230 231 public: 232 PrintStackChunkClosure(outputStream* st) : _st(st) {} 233 234 template <ChunkFrames frame_kind, typename RegisterMapT> 235 bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) { 236 frame f = fs.to_frame(); 237 _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d", 238 p2i(fs.sp()), fs.is_interpreted(), f.frame_size(), 239 fs.is_interpreted() || fs.is_stub() ? 0 : f.compiled_frame_stack_argsize()); 240 #ifdef ASSERT 241 f.print_value_on(_st); 242 #else 243 f.print_on(_st); 244 #endif 245 const ImmutableOopMap* oopmap = fs.oopmap(); 246 if (oopmap != nullptr) { 247 oopmap->print_on(_st); 248 _st->cr(); 249 } 250 return true; 251 } 252 }; 253 254 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) { 255 if (c == nullptr) { 256 st->print_cr("CHUNK null"); 257 return; 258 } 259 260 st->print_cr("CHUNK " PTR_FORMAT " " PTR_FORMAT " - " PTR_FORMAT " :: " INTPTR_FORMAT, 261 p2i(c), p2i(c->start_address()), p2i(c->end_address()), c->identity_hash()); 262 st->print_cr(" barriers: %d gc_mode: %d bitmap: %d parent: " PTR_FORMAT, 263 c->requires_barriers(), c->is_gc_mode(), c->has_bitmap(), p2i(c->parent())); 264 st->print_cr(" flags mixed: %d", c->has_mixed_frames()); 265 st->print_cr(" size: %d bottom: %d max_size: %d sp: %d pc: " PTR_FORMAT, 266 c->stack_size(), c->bottom(), c->max_thawing_size(), c->sp(), p2i(c->pc())); 267 268 if (verbose) { 269 st->cr(); 270 st->print_cr("------ chunk frames end: " PTR_FORMAT, p2i(c->bottom_address())); 271 PrintStackChunkClosure closure(st); 272 c->iterate_stack(&closure); 273 st->print_cr("------"); 274 275 #ifdef ASSERT 276 ResourceMark rm; 277 DescribeStackChunkClosure describe(c); 278 c->iterate_stack(&describe); 279 describe.print_on(st); 280 st->print_cr("======"); 281 #endif 282 } 283 } 284 285 void InstanceStackChunkKlass::init_offset_of_stack() { 286 // Cache the offset of the static fields in the Class instance 287 assert(_offset_of_stack == 0, "once"); 288 _offset_of_stack = cast(vmClasses::StackChunk_klass())->size_helper() << LogHeapWordSize; 289 }