1 /*
2 * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/cdsConfig.hpp"
27 #include "cds/serializeClosure.hpp"
28 #include "classfile/vmClasses.hpp"
29 #include "compiler/oopMap.inline.hpp"
30 #include "gc/shared/gc_globals.hpp"
31 #include "memory/oopFactory.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "oops/instanceStackChunkKlass.inline.hpp"
34 #include "oops/stackChunkOop.inline.hpp"
35 #include "runtime/continuation.hpp"
36 #include "runtime/continuationJavaClasses.inline.hpp"
37 #include "runtime/frame.hpp"
38 #include "runtime/handles.hpp"
39 #include "runtime/registerMap.hpp"
40 #include "runtime/smallRegisterMap.inline.hpp"
41 #include "runtime/stackChunkFrameStream.inline.hpp"
42 #include "utilities/devirtualizer.inline.hpp"
43 #include "utilities/globalDefinitions.hpp"
44 #include "utilities/macros.hpp"
45 #include "utilities/ostream.hpp"
46
47 int InstanceStackChunkKlass::_offset_of_stack = 0;
48
49 #if INCLUDE_CDS
50 void InstanceStackChunkKlass::serialize_offsets(SerializeClosure* f) {
51 f->do_int(&_offset_of_stack);
52 }
53 #endif
54
55 InstanceStackChunkKlass::InstanceStackChunkKlass() {
56 assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS");
57 }
58
59 InstanceStackChunkKlass::InstanceStackChunkKlass(const ClassFileParser& parser)
60 : InstanceKlass(parser, Kind) {
61 // Change the layout_helper to use the slow path because StackChunkOops are
62 // variable sized InstanceOops.
63 const jint lh = Klass::instance_layout_helper(size_helper(), true);
64 set_layout_helper(lh);
65 }
66
67 size_t InstanceStackChunkKlass::oop_size(oop obj) const {
68 return instance_size(jdk_internal_vm_StackChunk::size(obj));
69 }
70
71 #ifndef PRODUCT
72 void InstanceStackChunkKlass::oop_print_on(oop obj, outputStream* st) {
73 print_chunk(stackChunkOopDesc::cast(obj), false, st);
74 }
75 #endif
76
77 template<typename OopClosureType>
78 class StackChunkOopIterateFilterClosure: public OopClosure {
79 private:
80 OopClosureType* const _closure;
81 MemRegion _bound;
82
83 public:
84
85 StackChunkOopIterateFilterClosure(OopClosureType* closure, MemRegion bound)
86 : _closure(closure),
87 _bound(bound) {}
88
89 virtual void do_oop(oop* p) override { do_oop_work(p); }
90 virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
91
92 template <typename T>
93 void do_oop_work(T* p) {
94 if (_bound.contains(p)) {
95 Devirtualizer::do_oop(_closure, p);
96 }
97 }
98 };
99
100 class DoMethodsStackChunkFrameClosure {
101 OopIterateClosure* _closure;
102
103 public:
104 DoMethodsStackChunkFrameClosure(OopIterateClosure* cl) : _closure(cl) {}
105
106 template <ChunkFrames frame_kind, typename RegisterMapT>
107 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
108 if (f.is_interpreted()) {
109 Method* m = f.to_frame().interpreter_frame_method();
110 _closure->do_method(m);
111 } else if (f.is_compiled()) {
112 nmethod* nm = f.cb()->as_nmethod();
113 // The do_nmethod function takes care of having the right synchronization
114 // when keeping the nmethod alive during concurrent execution.
115 _closure->do_nmethod(nm);
116 // There is no need to mark the Method, as class redefinition will walk the
117 // CodeCache, noting their Methods
118 }
119 return true;
120 }
121 };
122
123 void InstanceStackChunkKlass::do_methods(stackChunkOop chunk, OopIterateClosure* cl) {
124 DoMethodsStackChunkFrameClosure closure(cl);
125 chunk->iterate_stack(&closure);
126 }
127
128 class OopIterateStackChunkFrameClosure {
129 OopIterateClosure* const _closure;
130 MemRegion _bound;
131 const bool _do_metadata;
132
133 public:
134 OopIterateStackChunkFrameClosure(OopIterateClosure* closure, MemRegion mr)
135 : _closure(closure),
136 _bound(mr),
137 _do_metadata(_closure->do_metadata()) {
138 assert(_closure != nullptr, "must be set");
139 }
140
141 template <ChunkFrames frame_kind, typename RegisterMapT>
142 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
143 if (_do_metadata) {
144 DoMethodsStackChunkFrameClosure(_closure).do_frame(f, map);
145 }
146
147 StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148 f.iterate_oops(&cl, map);
149
150 return true;
151 }
152 };
153
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155 if (UseZGC || UseShenandoahGC) {
156 // An OopClosure could apply barriers to a stack chunk. The side effects
157 // of the load barriers could destroy derived pointers, which must be
158 // processed before their base oop is processed. So we force processing
159 // of derived pointers before applying the closures.
160 chunk->relativize_derived_pointers_concurrently();
161 }
162 OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163 chunk->iterate_stack(&frame_closure);
164 }
165
166 #ifdef ASSERT
167
168 class DescribeStackChunkClosure {
169 stackChunkOop _chunk;
170 FrameValues _values;
171 RegisterMap _map;
172 int _frame_no;
173
174 public:
175 DescribeStackChunkClosure(stackChunkOop chunk)
176 : _chunk(chunk),
177 _map(nullptr,
178 RegisterMap::UpdateMap::include,
179 RegisterMap::ProcessFrames::skip,
180 RegisterMap::WalkContinuation::include),
181 _frame_no(0) {
182 _map.set_include_argument_oops(false);
183 }
184
185 const RegisterMap* get_map(const RegisterMap* map, intptr_t* sp) { return map; }
186 const RegisterMap* get_map(const SmallRegisterMap* map, intptr_t* sp) { return map->copy_to_RegisterMap(&_map, sp); }
187
188 template <ChunkFrames frame_kind, typename RegisterMapT>
189 bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
190 ResetNoHandleMark rnhm;
191 HandleMark hm(Thread::current());
192
193 frame fr = f.to_frame();
194 fr.describe(_values, _frame_no++, get_map(map, f.sp()));
195 return true;
196 }
197
198 void describe_chunk() {
199 // _values.describe(-1, _chunk->start_address(), "CHUNK START");
200 _values.describe(-1, _chunk->sp_address(), "CHUNK SP");
201 _values.describe(-1, _chunk->bottom_address() - 1, "CHUNK ARGS");
202 _values.describe(-1, _chunk->end_address() - 1, "CHUNK END");
203 }
204
205 void print_on(outputStream* out) {
206 if (_frame_no > 0) {
207 describe_chunk();
208 _values.print_on(_chunk, out);
209 } else {
210 out->print_cr(" EMPTY");
211 }
212 }
213 };
214 #endif
215
216 class PrintStackChunkClosure {
217 outputStream* _st;
218
219 public:
220 PrintStackChunkClosure(outputStream* st) : _st(st) {}
221
222 template <ChunkFrames frame_kind, typename RegisterMapT>
223 bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
224 frame f = fs.to_frame();
225 _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
226 p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
227 fs.is_interpreted() ? 0 : f.compiled_frame_stack_argsize());
228 #ifdef ASSERT
229 f.print_value_on(_st, nullptr);
230 #else
231 f.print_on(_st);
232 #endif
233 const ImmutableOopMap* oopmap = fs.oopmap();
234 if (oopmap != nullptr) {
235 oopmap->print_on(_st);
236 _st->cr();
237 }
238 return true;
239 }
240 };
241
242 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
243 if (c == nullptr) {
244 st->print_cr("CHUNK null");
245 return;
246 }
247
248 st->print_cr("CHUNK " PTR_FORMAT " " PTR_FORMAT " - " PTR_FORMAT " :: " INTPTR_FORMAT,
249 p2i(c), p2i(c->start_address()), p2i(c->end_address()), c->identity_hash());
250 st->print_cr(" barriers: %d gc_mode: %d bitmap: %d parent: " PTR_FORMAT,
251 c->requires_barriers(), c->is_gc_mode(), c->has_bitmap(), p2i(c->parent()));
252 st->print_cr(" flags mixed: %d", c->has_mixed_frames());
253 st->print_cr(" size: %d bottom: %d max_size: %d sp: %d pc: " PTR_FORMAT,
254 c->stack_size(), c->bottom(), c->max_thawing_size(), c->sp(), p2i(c->pc()));
255
256 if (verbose) {
257 st->cr();
258 st->print_cr("------ chunk frames end: " PTR_FORMAT, p2i(c->bottom_address()));
259 PrintStackChunkClosure closure(st);
260 c->iterate_stack(&closure);
261 st->print_cr("------");
262
263 #ifdef ASSERT
264 ResourceMark rm;
265 DescribeStackChunkClosure describe(c);
266 c->iterate_stack(&describe);
267 describe.print_on(st);
268 st->print_cr("======");
269 #endif
270 }
271 }
272
273 void InstanceStackChunkKlass::init_offset_of_stack() {
274 // Cache the offset of the static fields in the Class instance
275 assert(_offset_of_stack == 0, "once");
276 _offset_of_stack = cast(vmClasses::StackChunk_klass())->size_helper() << LogHeapWordSize;
277 }
--- EOF ---