1 /*
  2  * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/cdsConfig.hpp"
 27 #include "cds/serializeClosure.hpp"
 28 #include "classfile/vmClasses.hpp"
 29 #include "compiler/oopMap.inline.hpp"
 30 #include "gc/shared/gc_globals.hpp"
 31 #include "memory/oopFactory.hpp"
 32 #include "memory/resourceArea.hpp"
 33 #include "oops/instanceStackChunkKlass.inline.hpp"
 34 #include "oops/stackChunkOop.inline.hpp"
 35 #include "runtime/continuation.hpp"
 36 #include "runtime/continuationJavaClasses.inline.hpp"
 37 #include "runtime/frame.hpp"
 38 #include "runtime/handles.hpp"
 39 #include "runtime/registerMap.hpp"
 40 #include "runtime/smallRegisterMap.inline.hpp"
 41 #include "runtime/stackChunkFrameStream.inline.hpp"
 42 #include "utilities/devirtualizer.inline.hpp"
 43 #include "utilities/globalDefinitions.hpp"
 44 #include "utilities/macros.hpp"
 45 #include "utilities/ostream.hpp"
 46 
 47 int InstanceStackChunkKlass::_offset_of_stack = 0;
 48 
 49 #if INCLUDE_CDS
 50 void InstanceStackChunkKlass::serialize_offsets(SerializeClosure* f) {
 51   f->do_int(&_offset_of_stack);
 52 }
 53 #endif
 54 
 55 InstanceStackChunkKlass::InstanceStackChunkKlass() {
 56   assert(CDSConfig::is_dumping_static_archive() || CDSConfig::is_using_archive(), "only for CDS");
 57 }
 58 
 59 InstanceStackChunkKlass::InstanceStackChunkKlass(const ClassFileParser& parser)
 60   : InstanceKlass(parser, Kind) {
 61   // Change the layout_helper to use the slow path because StackChunkOops are
 62   // variable sized InstanceOops.
 63   const jint lh = Klass::instance_layout_helper(size_helper(), true);
 64   set_layout_helper(lh);
 65 }
 66 
 67 size_t InstanceStackChunkKlass::oop_size(oop obj) const {
 68   return instance_size(jdk_internal_vm_StackChunk::size(obj));
 69 }
 70 
 71 #ifndef PRODUCT
 72 void InstanceStackChunkKlass::oop_print_on(oop obj, outputStream* st) {
 73   print_chunk(stackChunkOopDesc::cast(obj), false, st);
 74 }
 75 #endif
 76 
 77 template<typename OopClosureType>
 78 class StackChunkOopIterateFilterClosure: public OopClosure {
 79 private:
 80   OopClosureType* const _closure;
 81   MemRegion _bound;
 82 
 83 public:
 84 
 85   StackChunkOopIterateFilterClosure(OopClosureType* closure, MemRegion bound)
 86     : _closure(closure),
 87       _bound(bound) {}
 88 
 89   virtual void do_oop(oop* p)       override { do_oop_work(p); }
 90   virtual void do_oop(narrowOop* p) override { do_oop_work(p); }
 91 
 92   template <typename T>
 93   void do_oop_work(T* p) {
 94     if (_bound.contains(p)) {
 95       Devirtualizer::do_oop(_closure, p);
 96     }
 97   }
 98 };
 99 
100 class DoMethodsStackChunkFrameClosure {
101   OopIterateClosure* _closure;
102 
103 public:
104   DoMethodsStackChunkFrameClosure(OopIterateClosure* cl) : _closure(cl) {}
105 
106   template <ChunkFrames frame_kind, typename RegisterMapT>
107   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
108     if (f.is_interpreted()) {
109       Method* m = f.to_frame().interpreter_frame_method();
110       _closure->do_method(m);
111     } else if (f.is_compiled()) {
112       nmethod* nm = f.cb()->as_nmethod();
113       // The do_nmethod function takes care of having the right synchronization
114       // when keeping the nmethod alive during concurrent execution.
115       _closure->do_nmethod(nm);
116       // There is no need to mark the Method, as class redefinition will walk the
117       // CodeCache, noting their Methods
118     }
119     return true;
120   }
121 };
122 
123 void InstanceStackChunkKlass::do_methods(stackChunkOop chunk, OopIterateClosure* cl) {
124   DoMethodsStackChunkFrameClosure closure(cl);
125   chunk->iterate_stack(&closure);
126 }
127 
128 class OopIterateStackChunkFrameClosure {
129   OopIterateClosure* const _closure;
130   MemRegion _bound;
131   const bool _do_metadata;
132 
133 public:
134   OopIterateStackChunkFrameClosure(OopIterateClosure* closure, MemRegion mr)
135     : _closure(closure),
136       _bound(mr),
137       _do_metadata(_closure->do_metadata()) {
138     assert(_closure != nullptr, "must be set");
139   }
140 
141   template <ChunkFrames frame_kind, typename RegisterMapT>
142   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
143     if (_do_metadata) {
144       DoMethodsStackChunkFrameClosure(_closure).do_frame(f, map);
145     }
146 
147     StackChunkOopIterateFilterClosure<OopIterateClosure> cl(_closure, _bound);
148     f.iterate_oops(&cl, map);
149 
150     return true;
151   }
152 };
153 
154 void InstanceStackChunkKlass::oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
155   if (UseZGC || UseShenandoahGC) {
156     // An OopClosure could apply barriers to a stack chunk. The side effects
157     // of the load barriers could destroy derived pointers, which must be
158     // processed before their base oop is processed. So we force processing
159     // of derived pointers before applying the closures.
160     chunk->relativize_derived_pointers_concurrently();
161   }
162   OopIterateStackChunkFrameClosure frame_closure(closure, mr);
163   chunk->iterate_stack(&frame_closure);
164 }
165 
166 template <typename OopT>
167 void InstanceStackChunkKlass::oop_oop_iterate_lockstack(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr) {
168   if (LockingMode != LM_LIGHTWEIGHT) {
169     return;
170   }
171 
172   StackChunkOopIterateFilterClosure<OopIterateClosure> cl(closure, mr);
173   if (chunk->has_bitmap()) {
174     chunk->iterate_lockstack<OopT>(&cl);
175   } else {
176     chunk->iterate_lockstack<oop>(&cl);
177   }
178 }
179 
180 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<oop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
181 template void InstanceStackChunkKlass::oop_oop_iterate_lockstack<narrowOop>(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
182 
183 #ifdef ASSERT
184 
185 class DescribeStackChunkClosure {
186   stackChunkOop _chunk;
187   FrameValues _values;
188   RegisterMap _map;
189   int _frame_no;
190 
191 public:
192   DescribeStackChunkClosure(stackChunkOop chunk)
193     : _chunk(chunk),
194       _map(nullptr,
195            RegisterMap::UpdateMap::include,
196            RegisterMap::ProcessFrames::skip,
197            RegisterMap::WalkContinuation::include),
198       _frame_no(0) {
199     _map.set_include_argument_oops(false);
200   }
201 
202   const RegisterMap* get_map(const RegisterMap* map,      intptr_t* sp) { return map; }
203   const RegisterMap* get_map(const SmallRegisterMap* map, intptr_t* sp) { return map->copy_to_RegisterMap(&_map, sp); }
204 
205   template <ChunkFrames frame_kind, typename RegisterMapT>
206   bool do_frame(const StackChunkFrameStream<frame_kind>& f, const RegisterMapT* map) {
207     ResetNoHandleMark rnhm;
208     HandleMark hm(Thread::current());
209 
210     frame fr = f.to_frame();
211     fr.describe(_values, _frame_no++, get_map(map, f.sp()));
212     return true;
213   }
214 
215   void describe_chunk() {
216     // _values.describe(-1, _chunk->start_address(), "CHUNK START");
217     _values.describe(-1, _chunk->sp_address(),         "CHUNK SP");
218     _values.describe(-1, _chunk->bottom_address() - 1, "CHUNK ARGS");
219     _values.describe(-1, _chunk->end_address() - 1,    "CHUNK END");
220   }
221 
222   void print_on(outputStream* out) {
223     if (_frame_no > 0) {
224       describe_chunk();
225       _values.print_on(_chunk, out);
226     } else {
227       out->print_cr(" EMPTY");
228     }
229   }
230 };
231 #endif
232 
233 class PrintStackChunkClosure {
234   outputStream* _st;
235 
236 public:
237   PrintStackChunkClosure(outputStream* st) : _st(st) {}
238 
239   template <ChunkFrames frame_kind, typename RegisterMapT>
240   bool do_frame(const StackChunkFrameStream<frame_kind>& fs, const RegisterMapT* map) {
241     frame f = fs.to_frame();
242     _st->print_cr("-- frame sp: " PTR_FORMAT " interpreted: %d size: %d argsize: %d",
243                   p2i(fs.sp()), fs.is_interpreted(), f.frame_size(),
244                   fs.is_interpreted() || fs.is_stub() ? 0 : f.compiled_frame_stack_argsize());
245   #ifdef ASSERT
246     f.print_value_on(_st);
247   #else
248     f.print_on(_st);
249   #endif
250     const ImmutableOopMap* oopmap = fs.oopmap();
251     if (oopmap != nullptr) {
252       oopmap->print_on(_st);
253       _st->cr();
254     }
255     return true;
256   }
257 };
258 
259 void InstanceStackChunkKlass::print_chunk(const stackChunkOop c, bool verbose, outputStream* st) {
260   if (c == nullptr) {
261     st->print_cr("CHUNK null");
262     return;
263   }
264 
265   st->print_cr("CHUNK " PTR_FORMAT " " PTR_FORMAT " - " PTR_FORMAT " :: " INTPTR_FORMAT,
266                p2i(c), p2i(c->start_address()), p2i(c->end_address()), c->identity_hash());
267   st->print_cr("       barriers: %d gc_mode: %d bitmap: %d parent: " PTR_FORMAT,
268                c->requires_barriers(), c->is_gc_mode(), c->has_bitmap(), p2i(c->parent()));
269   st->print_cr("       flags mixed: %d", c->has_mixed_frames());
270   st->print_cr("       size: %d bottom: %d max_size: %d sp: %d pc: " PTR_FORMAT,
271                c->stack_size(), c->bottom(), c->max_thawing_size(), c->sp(), p2i(c->pc()));
272 
273   if (verbose) {
274     st->cr();
275     st->print_cr("------ chunk frames end: " PTR_FORMAT, p2i(c->bottom_address()));
276     PrintStackChunkClosure closure(st);
277     c->iterate_stack(&closure);
278     st->print_cr("------");
279 
280   #ifdef ASSERT
281     ResourceMark rm;
282     DescribeStackChunkClosure describe(c);
283     c->iterate_stack(&describe);
284     describe.print_on(st);
285     st->print_cr("======");
286   #endif
287   }
288 }
289 
290 void InstanceStackChunkKlass::init_offset_of_stack() {
291   // Cache the offset of the static fields in the Class instance
292   assert(_offset_of_stack == 0, "once");
293   _offset_of_stack = cast(vmClasses::StackChunk_klass())->size_helper() << LogHeapWordSize;
294 }