1 /*
  2  * Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_OOPS_INSTANCESTACKCHUNKKLASS_HPP
 26 #define SHARE_OOPS_INSTANCESTACKCHUNKKLASS_HPP
 27 
 28 #include "classfile/vmClasses.hpp"
 29 #include "oops/instanceKlass.hpp"
 30 #include "oops/stackChunkOop.hpp"
 31 #include "runtime/handles.hpp"
 32 #include "utilities/macros.hpp"
 33 
 34 class frame;
 35 class ClassFileParser;
 36 class ImmutableOopMap;
 37 class VMRegImpl;
 38 typedef VMRegImpl* VMReg;
 39 template <bool mixed = true> class StackChunkFrameStream;
 40 
 41 
 42 // An InstanceStackChunkKlass is a specialization of the InstanceKlass. 
 43 // It has a header containing metadata, and a blob containing a stack segment
 44 // (some integral number of stack frames)
 45 //
 46 // A chunk is said to be "mixed" if it contains interpreter frames or stubs
 47 // (which can only be a safepoint stub as the topmost frame). Otherwise, it
 48 // must contain only compiled Java frames.
 49 //
 50 // Interpreter frames in chunks have their internal pointers converted to
 51 // relative offsets from sp. Derived pointers in compiled frames might also
 52 // be converted to relative offsets from their base.
 53 
 54 class InstanceStackChunkKlass: public InstanceKlass {
 55   friend class VMStructs;
 56   friend class InstanceKlass;
 57   friend class stackChunkOopDesc;
 58   friend class Continuations;
 59   template <bool mixed> friend class StackChunkFrameStream; 
 60   friend class FixChunkIterateStackClosure;
 61   friend class MarkMethodsStackClosure;
 62   template <bool concurrent_gc, typename OopClosureType> friend class OopOopIterateStackClosure;
 63 
 64 public:
 65   static const KlassID ID = InstanceStackChunkKlassID;
 66 
 67 private:
 68   static int _offset_of_stack;
 69 
 70   InstanceStackChunkKlass(const ClassFileParser& parser);
 71   static inline int metadata_words(); // size, in words, of frame metadata (e.g. pc and link); same as ContinuationHelper::frame_metadata
 72   static inline int align_wiggle();   // size, in words, of maximum shift in frame position due to alignment; same as ContinuationHelper::align_wiggle
 73 
 74 public:
 75   InstanceStackChunkKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); }
 76 
 77   // Casting from Klass*
 78   static InstanceStackChunkKlass* cast(Klass* k) {
 79     assert(InstanceKlass::cast(k)->is_stack_chunk_instance_klass(), "");
 80     return static_cast<InstanceStackChunkKlass*>(k);
 81   }
 82 
 83   inline int instance_size(int stack_size_in_words) const;
 84   static inline int bitmap_size(int stack_size_in_words); // in words
 85   // the *last* bit in the bitmap corresponds to the last word in the stack; this returns the bit index corresponding to the first word
 86   static inline BitMap::idx_t bit_offset(int stack_size_in_words);
 87 
 88   // Returns the size of the instance including the stack data.
 89   virtual int oop_size(oop obj) const override;
 90   virtual int compact_oop_size(oop obj) const override;
 91 
 92   virtual size_t copy_disjoint(oop obj, HeapWord* to, size_t word_size) override { return copy<true> (obj, to, word_size); }
 93   virtual size_t copy_conjoint(oop obj, HeapWord* to, size_t word_size) override { return copy<false>(obj, to, word_size); }
 94 
 95   virtual size_t copy_disjoint_compact(oop obj, HeapWord* to) override { return copy_compact<true> (obj, to); }
 96   virtual size_t copy_conjoint_compact(oop obj, HeapWord* to) override { return copy_compact<false>(obj, to); }
 97 
 98   static void serialize_offsets(class SerializeClosure* f) NOT_CDS_RETURN;
 99 
100   static void print_chunk(const stackChunkOop chunk, bool verbose, outputStream* st = tty);
101 
102   static inline void assert_mixed_correct(stackChunkOop chunk, bool mixed) PRODUCT_RETURN;
103 #ifndef PRODUCT
104   void oop_print_on(oop obj, outputStream* st) override;
105 #endif
106 
107   static bool verify(oop obj, size_t* out_size = NULL, int* out_oops = NULL, int* out_frames = NULL, int* out_interpreted_frames = NULL) NOT_DEBUG({ return true; });
108 
109   // Stack offset is an offset into the Heap
110   static HeapWord* start_of_stack(oop obj) { return (HeapWord*)(cast_from_oop<intptr_t>(obj) + offset_of_stack()); }
111   static inline HeapWord* start_of_bitmap(oop obj);
112 
113   static int offset_of_stack() { return _offset_of_stack; }
114   static void init_offset_of_stack() {
115     // Cache the offset of the static fields in the Class instance
116     assert(_offset_of_stack == 0, "once");
117     _offset_of_stack = InstanceStackChunkKlass::cast(vmClasses::StackChunk_klass())->size_helper() << LogHeapWordSize;
118   }
119 
120 
121   template<bool mixed = true>
122   static int count_frames(stackChunkOop chunk);
123   
124   // Oop fields (and metadata) iterators
125   //
126   // The InstanceClassLoaderKlass iterators also visit the CLD pointer (or mirror of anonymous klasses.)
127 
128   // Forward iteration
129   // Iterate over the oop fields and metadata.
130   template <typename T, class OopClosureType>
131   inline void oop_oop_iterate(oop obj, OopClosureType* closure);
132 
133   // Reverse iteration
134   // Iterate over the oop fields and metadata.
135   template <typename T, class OopClosureType>
136   inline void oop_oop_iterate_reverse(oop obj, OopClosureType* closure);
137 
138   // Bounded range iteration
139   // Iterate over the oop fields and metadata.
140   template <typename T, class OopClosureType>
141   inline void oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr);
142 
143 public:
144   template <bool store>
145   static void do_barriers(stackChunkOop chunk);
146 
147   template <bool store, bool mixed, typename RegisterMapT>
148   static void do_barriers(stackChunkOop chunk, const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
149 
150   template <typename RegisterMapT>
151   static void fix_thawed_frame(stackChunkOop chunk, const frame& f, const RegisterMapT* map);
152 
153 private:
154   static int bitmap_size_in_bits(int stack_size_in_words) { return stack_size_in_words << (UseCompressedOops ? 1 : 0); }
155   void build_bitmap(stackChunkOop chunk);
156 
157   template<bool disjoint> size_t copy(oop obj, HeapWord* to, size_t word_size);
158   template<bool disjoint> size_t copy_compact(oop obj, HeapWord* to);
159   
160   template <typename T, class OopClosureType>
161   inline void oop_oop_iterate_header(stackChunkOop chunk, OopClosureType* closure);
162   
163   template <typename T, class OopClosureType>
164   inline void oop_oop_iterate_header_bounded(stackChunkOop chunk, OopClosureType* closure, MemRegion mr);
165 
166   template <bool concurrent_gc, class OopClosureType>
167   inline void oop_oop_iterate_stack(stackChunkOop chunk, OopClosureType* closure);
168 
169   template <bool concurrent_gc, class OopClosureType>
170   inline void oop_oop_iterate_stack_bounded(stackChunkOop chunk, OopClosureType* closure, MemRegion mr);
171   
172   template <class OopClosureType>
173   inline void oop_oop_iterate_stack_helper(stackChunkOop chunk, OopClosureType* closure, intptr_t* start, intptr_t* end);
174 
175   void mark_methods(stackChunkOop chunk, OopIterateClosure* cl);
176 
177   template <bool mixed, class StackChunkFrameClosureType>
178   static inline void iterate_stack(stackChunkOop obj, StackChunkFrameClosureType* closure);
179 
180   template <bool concurrent_gc>
181   void oop_oop_iterate_stack_slow(stackChunkOop chunk, OopIterateClosure* closure, MemRegion mr);
182 
183   template <bool concurrent_gc, bool mixed, typename RegisterMapT>
184   static void relativize_derived_pointers(const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
185 
186   template <bool mixed, typename RegisterMapT>
187   static void derelativize_derived_pointers(const StackChunkFrameStream<mixed>& f, const RegisterMapT* map);
188   
189   typedef void (*MemcpyFnT)(void* src, void* dst, size_t count);
190   static void resolve_memcpy_functions();
191   static MemcpyFnT memcpy_fn_from_stack_to_chunk;
192   static MemcpyFnT memcpy_fn_from_chunk_to_stack;
193   template <bool dword_aligned> inline static void copy_from_stack_to_chunk(void* from, void* to, size_t size);
194   template <bool dword_aligned> inline static void copy_from_chunk_to_stack(void* from, void* to, size_t size);
195   static void default_memcpy(void* from, void* to, size_t size);
196 };
197 
198 template <bool mixed>
199 class StackChunkFrameStream : public StackObj {
200  private:
201   intptr_t* _end;
202   intptr_t* _sp;
203   intptr_t* _unextended_sp; // used only when mixed
204   CodeBlob* _cb;
205   mutable const ImmutableOopMap* _oopmap;
206 
207 #ifndef PRODUCT
208   stackChunkOop _chunk;
209   int _index;
210 #endif
211 
212 #ifdef ASSERT
213   int _has_stub;
214 #endif
215 
216 public:
217   StackChunkFrameStream() { NOT_PRODUCT(_chunk = nullptr; _index = -1;) DEBUG_ONLY(_has_stub = false;) }
218   inline StackChunkFrameStream(stackChunkOop chunk, bool gc = false);
219   inline StackChunkFrameStream(stackChunkOop chunk, const frame& f);
220 
221   bool is_done() const { return _sp >= _end; }
222   bool is_last() const { return next_sp() >= _end; }
223 
224   intptr_t* end() { return _end; }
225   void set_end(intptr_t* end) { _end = end; }
226 
227   // Query
228   intptr_t* end() const { return _end; }
229 
230   intptr_t*        sp() const  { return _sp; }
231   inline address   pc() const  { return get_pc(); }
232   inline intptr_t* fp() const;
233   inline intptr_t* unextended_sp() const { return mixed ? _unextended_sp : _sp; }
234   NOT_PRODUCT(int index() { return _index; })
235   inline address orig_pc() const;
236 
237   inline bool is_interpreted() const;
238   inline bool is_stub() const;
239   inline bool is_compiled() const;
240   CodeBlob* cb() const { return _cb; }
241   const ImmutableOopMap* oopmap() const { if (_oopmap == NULL) get_oopmap(); return _oopmap; }
242   inline int frame_size() const;
243   inline int stack_argsize() const;
244   inline int num_oops() const;
245 
246   inline void initialize_register_map(RegisterMap* map);
247   template <typename RegisterMapT> inline void next(RegisterMapT* map);
248 
249   template <typename RegisterMapT> inline void update_reg_map(RegisterMapT* map);
250   
251   void handle_deopted() const;
252 
253   inline int to_offset(stackChunkOop chunk) const { assert (!is_done(), ""); return _sp - chunk->start_address(); }
254 
255   inline frame to_frame() const;
256 
257 #ifdef ASSERT
258   bool is_in_frame(void* p) const;
259   bool is_deoptimized() const;
260   template <typename RegisterMapT> bool is_in_oops(void* p, const RegisterMapT* map) const;
261 #endif
262 
263   void print_on(outputStream* st) const PRODUCT_RETURN;
264 
265  private:
266   inline address get_pc() const;
267   inline void get_cb();
268 
269   inline intptr_t* next_sp() const;
270   inline int interpreter_frame_size() const;
271   inline int interpreter_frame_num_oops() const;
272   inline int interpreter_frame_stack_argsize() const;
273   inline void next_for_interpreter_frame();
274   inline intptr_t* next_sp_for_interpreter_frame() const;
275   inline intptr_t* unextended_sp_for_interpreter_frame() const;
276   inline intptr_t* derelativize(int offset) const;
277   inline void get_oopmap() const;
278   inline void get_oopmap(address pc, int oopmap_slot) const;
279   static inline int get_initial_sp(stackChunkOop chunk, bool gc);
280 
281   template <typename RegisterMapT> inline void update_reg_map_pd(RegisterMapT* map);
282 
283   template <typename RegisterMapT>
284   inline void* reg_to_loc(VMReg reg, const RegisterMapT* map) const;
285 
286 public:
287   template <class OopClosureType, class RegisterMapT> inline void iterate_oops(OopClosureType* closure, const RegisterMapT* map) const;
288   template <class DerivedOopClosureType, class RegisterMapT> inline void iterate_derived_pointers(DerivedOopClosureType* closure, const RegisterMapT* map) const;
289 };
290 
291 #endif // SHARE_OOPS_INSTANCESTACKCHUNKKLASS_HPP