1 /*
  2  * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CDS_ARCHIVEBUILDER_HPP
 26 #define SHARE_CDS_ARCHIVEBUILDER_HPP
 27 
 28 #include "cds/archiveUtils.hpp"
 29 #include "cds/dumpAllocStats.hpp"
 30 #include "memory/metaspace.hpp"
 31 #include "memory/metaspaceClosure.hpp"
 32 #include "oops/array.hpp"
 33 #include "oops/klass.hpp"
 34 #include "runtime/os.hpp"
 35 #include "utilities/bitMap.hpp"
 36 #include "utilities/growableArray.hpp"
 37 #include "utilities/resizeableResourceHash.hpp"
 38 #include "utilities/resourceHash.hpp"
 39 
 40 class ArchiveHeapInfo;
 41 class CHeapBitMap;
 42 class FileMapInfo;
 43 class Klass;
 44 class MemRegion;
 45 class Symbol;
 46 
 47 // The minimum alignment for non-Klass objects inside the CDS archive. Klass objects need
 48 // to follow CompressedKlassPointers::klass_alignment_in_bytes().
 49 constexpr size_t SharedSpaceObjectAlignment = Metaspace::min_allocation_alignment_bytes;
 50 
 51 // Overview of CDS archive creation (for both static and dynamic dump):
 52 //
 53 // [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution)
 54 // [2] Allocate "output buffer"
 55 // [3] Copy contents of the 2 "core" regions (rw/ro) into the output buffer.
 56 //       - allocate the cpp vtables in rw (static dump only)
 57 //       - memcpy the MetaspaceObjs into rw/ro:
 58 //         dump_rw_region();
 59 //         dump_ro_region();
 60 //       - fix all the pointers in the MetaspaceObjs to point to the copies
 61 //         relocate_metaspaceobj_embedded_pointers()
 62 // [4] Copy symbol table, dictionary, etc, into the ro region
 63 // [5] Relocate all the pointers in rw/ro, so that the archive can be mapped to
 64 //     the "requested" location without runtime relocation. See relocate_to_requested()
 65 //
 66 // "source" vs "buffered" vs "requested"
 67 //
 68 // The ArchiveBuilder deals with three types of addresses.
 69 //
 70 // "source":    These are the addresses of objects created in step [1] above. They are the actual
 71 //              InstanceKlass*, Method*, etc, of the Java classes that are loaded for executing
 72 //              Java bytecodes in the JVM process that's dumping the CDS archive.
 73 //
 74 //              It may be necessary to contiue Java execution after ArchiveBuilder is finished.
 75 //              Therefore, we don't modify any of the "source" objects.
 76 //
 77 // "buffered":  The "source" objects that are deemed archivable are copied into a temporary buffer.
 78 //              Objects in the buffer are modified in steps [2, 3, 4] (e.g., unshareable info is
 79 //              removed, pointers are relocated, etc) to prepare them to be loaded at runtime.
 80 //
 81 // "requested": These are the addreses where the "buffered" objects should be loaded at runtime.
 82 //              When the "buffered" objects are written into the archive file, their addresses
 83 //              are adjusted in step [5] such that the lowest of these objects would be mapped
 84 //              at SharedBaseAddress.
 85 //
 86 // Translation between "source" and "buffered" addresses is done with two hashtables:
 87 //     _src_obj_table          : "source"   -> "buffered"
 88 //     _buffered_to_src_table  : "buffered" -> "source"
 89 //
 90 // Translation between "buffered" and "requested" addresses is done with a simple shift:
 91 //    buffered_address + _buffer_to_requested_delta == requested_address
 92 //
 93 class ArchiveBuilder : public StackObj {
 94 protected:
 95   DumpRegion* _current_dump_region;
 96   address _buffer_bottom;                      // for writing the contents of rw/ro regions
 97   address _last_verified_top;
 98   int _num_dump_regions_used;
 99   size_t _other_region_used_bytes;
100 
101   // These are the addresses where we will request the static and dynamic archives to be
102   // mapped at run time. If the request fails (due to ASLR), we will map the archives at
103   // os-selected addresses.
104   address _requested_static_archive_bottom;     // This is determined solely by the value of
105                                                 // SharedBaseAddress during -Xshare:dump.
106   address _requested_static_archive_top;
107   address _requested_dynamic_archive_bottom;    // Used only during dynamic dump. It's placed
108                                                 // immediately above _requested_static_archive_top.
109   address _requested_dynamic_archive_top;
110 
111   // (Used only during dynamic dump) where the static archive is actually mapped. This
112   // may be different than _requested_static_archive_{bottom,top} due to ASLR
113   address _mapped_static_archive_bottom;
114   address _mapped_static_archive_top;
115 
116   intx _buffer_to_requested_delta;
117 
118   DumpRegion* current_dump_region() const {  return _current_dump_region;  }
119 
120 public:
121   enum FollowMode {
122     make_a_copy, point_to_it, set_to_null
123   };
124 
125 private:
126   class SourceObjInfo {
127     uintx _ptrmap_start;     // The bit-offset of the start of this object (inclusive)
128     uintx _ptrmap_end;       // The bit-offset of the end   of this object (exclusive)
129     bool _read_only;
130     bool _has_embedded_pointer;
131     FollowMode _follow_mode;
132     int _size_in_bytes;
133     int _id; // Each object has a unique serial ID, starting from zero. The ID is assigned
134              // when the object is added into _source_objs.
135     MetaspaceObj::Type _msotype;
136     address _source_addr;    // The source object to be copied.
137     address _buffered_addr;  // The copy of this object insider the buffer.
138   public:
139     SourceObjInfo(MetaspaceClosure::Ref* ref, bool read_only, FollowMode follow_mode) :
140       _ptrmap_start(0), _ptrmap_end(0), _read_only(read_only), _has_embedded_pointer(false), _follow_mode(follow_mode),
141       _size_in_bytes(ref->size() * BytesPerWord), _id(0), _msotype(ref->msotype()),
142       _source_addr(ref->obj()) {
143       if (follow_mode == point_to_it) {
144         _buffered_addr = ref->obj();
145       } else {
146         _buffered_addr = nullptr;
147       }
148     }
149     SourceObjInfo(address src, address buf) {
150       _source_addr = src;
151       _buffered_addr = buf;
152     }
153 
154     // This constructor is only used for regenerated objects (created by LambdaFormInvokers, etc).
155     //   src = address of a Method or InstanceKlass that has been regenerated.
156     //   renegerated_obj_info = info for the regenerated version of src.
157     SourceObjInfo(address src, SourceObjInfo* renegerated_obj_info) :
158       _ptrmap_start(0), _ptrmap_end(0), _read_only(false),
159       _follow_mode(renegerated_obj_info->_follow_mode),
160       _size_in_bytes(0), _msotype(renegerated_obj_info->_msotype),
161       _source_addr(src),  _buffered_addr(renegerated_obj_info->_buffered_addr) {}
162 
163     bool should_copy() const { return _follow_mode == make_a_copy; }
164     void set_buffered_addr(address addr)  {
165       assert(should_copy(), "must be");
166       assert(_buffered_addr == nullptr, "cannot be copied twice");
167       assert(addr != nullptr, "must be a valid copy");
168       _buffered_addr = addr;
169     }
170     void set_ptrmap_start(uintx v) { _ptrmap_start = v;    }
171     void set_ptrmap_end(uintx v)   { _ptrmap_end = v;      }
172     uintx ptrmap_start()  const    { return _ptrmap_start; } // inclusive
173     uintx ptrmap_end()    const    { return _ptrmap_end;   } // exclusive
174     bool read_only()      const    { return _read_only;    }
175     bool has_embedded_pointer() const { return _has_embedded_pointer; }
176     void set_has_embedded_pointer()   { _has_embedded_pointer = true; }
177     int size_in_bytes()   const    { return _size_in_bytes; }
178     int id()              const    { return _id; }
179     void set_id(int i)             { _id = i; }
180     address source_addr() const    { return _source_addr; }
181     address buffered_addr() const  {
182       if (_follow_mode != set_to_null) {
183         assert(_buffered_addr != nullptr, "must be initialized");
184       }
185       return _buffered_addr;
186     }
187     MetaspaceObj::Type msotype() const { return _msotype; }
188   };
189 
190   class SourceObjList {
191     uintx _total_bytes;
192     GrowableArray<SourceObjInfo*>* _objs;     // Source objects to be archived
193     CHeapBitMap _ptrmap;                      // Marks the addresses of the pointer fields
194                                               // in the source objects
195   public:
196     SourceObjList();
197     ~SourceObjList();
198 
199     GrowableArray<SourceObjInfo*>* objs() const { return _objs; }
200 
201     void append(SourceObjInfo* src_info);
202     void remember_embedded_pointer(SourceObjInfo* pointing_obj, MetaspaceClosure::Ref* ref);
203     void relocate(int i, ArchiveBuilder* builder);
204 
205     // convenience accessor
206     SourceObjInfo* at(int i) const { return objs()->at(i); }
207   };
208 
209   class CDSMapLogger;
210 
211   static const int INITIAL_TABLE_SIZE = 15889;
212   static const int MAX_TABLE_SIZE     = 1000000;
213 
214   ReservedSpace _shared_rs;
215   VirtualSpace _shared_vs;
216 
217   DumpRegion _rw_region;
218   DumpRegion _ro_region;
219   DumpRegion _cc_region;
220 
221   // Combined bitmap to track pointers in both RW and RO regions. This is updated
222   // as objects are copied into RW and RO.
223   CHeapBitMap _ptrmap;
224 
225   // _ptrmap is split into these two bitmaps which are written into the archive.
226   CHeapBitMap _rw_ptrmap;   // marks pointers in the RW region
227   CHeapBitMap _ro_ptrmap;   // marks pointers in the RO region
228   CHeapBitMap _cc_ptrmap;   // marks pointers in the CC region
229 
230   SourceObjList _rw_src_objs;                 // objs to put in rw region
231   SourceObjList _ro_src_objs;                 // objs to put in ro region
232   ResizeableResourceHashtable<address, SourceObjInfo, AnyObj::C_HEAP, mtClassShared> _src_obj_table;
233   ResizeableResourceHashtable<address, address, AnyObj::C_HEAP, mtClassShared> _buffered_to_src_table;
234   GrowableArray<Klass*>* _klasses;
235   GrowableArray<Symbol*>* _symbols;
236   unsigned int _entropy_seed;
237 
238   // statistics
239   DumpAllocStats _alloc_stats;
240   size_t _total_heap_region_size;
241 
242   void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info);
243   void print_bitmap_region_stats(size_t size, size_t total_size);
244   void print_heap_region_stats(ArchiveHeapInfo* heap_info, size_t total_size);
245 
246   // For global access.
247   static ArchiveBuilder* _current;
248 
249 public:
250   // Use this when you allocate space outside of ArchiveBuilder::dump_{rw,ro}_region.
251   // These are usually for misc tables that are allocated in the RO space.
252   class OtherROAllocMark {
253     char* _oldtop;
254   public:
255     OtherROAllocMark() {
256       _oldtop = _current->_ro_region.top();
257     }
258     ~OtherROAllocMark();
259   };
260 
261 private:
262   FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
263 
264   void iterate_sorted_roots(MetaspaceClosure* it);
265   void sort_klasses();
266   static int compare_symbols_by_address(Symbol** a, Symbol** b);
267   static int compare_klass_by_name(Klass** a, Klass** b);
268   void update_hidden_class_loader_type(InstanceKlass* ik) NOT_CDS_JAVA_HEAP_RETURN;
269 
270   void make_shallow_copies(DumpRegion *dump_region, const SourceObjList* src_objs);
271   void make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* src_info);
272 
273   void relocate_embedded_pointers(SourceObjList* src_objs);
274 
275   bool is_excluded(Klass* k);
276   void clean_up_src_obj_table();
277 
278 protected:
279   virtual void iterate_roots(MetaspaceClosure* it) = 0;
280 
281   // Conservative estimate for number of bytes needed for:
282   size_t _estimated_metaspaceobj_bytes;   // all archived MetaspaceObj's.
283   size_t _estimated_hashtable_bytes;     // symbol table and dictionaries
284 
285   static const int _total_dump_regions = 2;
286 
287   size_t estimate_archive_size();
288 
289   void start_dump_region(DumpRegion* next);
290   void verify_estimate_size(size_t estimate, const char* which);
291 
292 public:
293   address reserve_buffer();
294 
295   address buffer_bottom()                    const { return _buffer_bottom;                        }
296   address buffer_top()                       const { return (address)current_dump_region()->top(); }
297   address requested_static_archive_bottom()  const { return  _requested_static_archive_bottom;     }
298   address mapped_static_archive_bottom()     const { return  _mapped_static_archive_bottom;        }
299   intx buffer_to_requested_delta()           const { return _buffer_to_requested_delta;            }
300 
301   bool is_in_buffer_space(address p) const {
302     return (buffer_bottom() != nullptr && buffer_bottom() <= p && p < buffer_top());
303   }
304 
305   template <typename T> bool is_in_requested_static_archive(T p) const {
306     return _requested_static_archive_bottom <= (address)p && (address)p < _requested_static_archive_top;
307   }
308 
309   template <typename T> bool is_in_mapped_static_archive(T p) const {
310     return _mapped_static_archive_bottom <= (address)p && (address)p < _mapped_static_archive_top;
311   }
312 
313   template <typename T> bool is_in_buffer_space(T obj) const {
314     return is_in_buffer_space(address(obj));
315   }
316 
317   template <typename T> T to_requested(T obj) const {
318     assert(is_in_buffer_space(obj), "must be");
319     return (T)(address(obj) + _buffer_to_requested_delta);
320   }
321 
322   static intx get_buffer_to_requested_delta() {
323     return current()->buffer_to_requested_delta();
324   }
325 
326   inline static u4 to_offset_u4(uintx offset) {
327     guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset " INTPTR_FORMAT, offset);
328     return (u4)offset;
329   }
330 
331 public:
332   static const uintx MAX_SHARED_DELTA = ArchiveUtils::MAX_SHARED_DELTA;;
333 
334   // The address p points to an object inside the output buffer. When the archive is mapped
335   // at the requested address, what's the offset of this object from _requested_static_archive_bottom?
336   uintx buffer_to_offset(address p) const;
337 
338   // Same as buffer_to_offset, except that the address p points to either (a) an object
339   // inside the output buffer, or (b), an object in the currently mapped static archive.
340   uintx any_to_offset(address p) const;
341 
342   // The reverse of buffer_to_offset()
343   address offset_to_buffered_address(u4 offset) const;
344 
345   template <typename T>
346   u4 buffer_to_offset_u4(T p) const {
347     uintx offset = buffer_to_offset((address)p);
348     return to_offset_u4(offset);
349   }
350 
351   template <typename T>
352   u4 any_to_offset_u4(T p) const {
353     uintx offset = any_to_offset((address)p);
354     return to_offset_u4(offset);
355   }
356 
357   template <typename T>
358   T offset_to_buffered(u4 offset) const {
359     return (T)offset_to_buffered_address(offset);
360   }
361 
362 public:
363   ArchiveBuilder();
364   ~ArchiveBuilder();
365 
366   int entropy();
367   void gather_klasses_and_symbols();
368   void gather_source_objs();
369   bool gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool read_only);
370   bool gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read_only);
371   void remember_embedded_pointer_in_enclosing_obj(MetaspaceClosure::Ref* ref);
372   static void serialize_dynamic_archivable_items(SerializeClosure* soc);
373 
374   DumpRegion* rw_region() { return &_rw_region; }
375   DumpRegion* ro_region() { return &_ro_region; }
376   DumpRegion* cc_region() { return &_cc_region; }
377 
378   void start_cc_region();
379   void end_cc_region();
380 
381   static char* rw_region_alloc(size_t num_bytes) {
382     return current()->rw_region()->allocate(num_bytes);
383   }
384   static char* ro_region_alloc(size_t num_bytes) {
385     return current()->ro_region()->allocate(num_bytes);
386   }
387   static char* cc_region_alloc(size_t num_bytes) {
388     return current()->cc_region()->allocate(num_bytes);
389   }
390 
391   template <typename T>
392   static Array<T>* new_ro_array(int length) {
393     size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
394     Array<T>* array = (Array<T>*)ro_region_alloc(byte_size);
395     array->initialize(length);
396     return array;
397   }
398 
399   template <typename T>
400   static Array<T>* new_rw_array(int length) {
401     size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
402     Array<T>* array = (Array<T>*)rw_region_alloc(byte_size);
403     array->initialize(length);
404     return array;
405   }
406 
407   template <typename T>
408   static size_t ro_array_bytesize(int length) {
409     size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
410     return align_up(byte_size, SharedSpaceObjectAlignment);
411   }
412 
413   char* ro_strdup(const char* s);
414 
415   static int compare_src_objs(SourceObjInfo** a, SourceObjInfo** b);
416   void sort_metadata_objs();
417   void dump_rw_metadata();
418   void dump_ro_metadata();
419   void relocate_metaspaceobj_embedded_pointers();
420   void record_regenerated_object(address orig_src_obj, address regen_src_obj);
421   void make_klasses_shareable();
422   void make_training_data_shareable();
423   void relocate_to_requested();
424   void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info);
425   void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
426                     bool read_only,  bool allow_exec);
427 
428   void write_pointer_in_buffer(address* ptr_location, address src_addr);
429   template <typename T> void write_pointer_in_buffer(T* ptr_location, T src_addr) {
430     write_pointer_in_buffer((address*)ptr_location, (address)src_addr);
431   }
432 
433   void mark_and_relocate_to_buffered_addr(address* ptr_location);
434   template <typename T> void mark_and_relocate_to_buffered_addr(T ptr_location) {
435     mark_and_relocate_to_buffered_addr((address*)ptr_location);
436   }
437 
438   bool has_been_archived(address src_addr) const;
439 
440   bool has_been_buffered(address src_addr) const;
441   template <typename T> bool has_been_buffered(T src_addr) const {
442     return has_been_buffered((address)src_addr);
443   }
444   address get_buffered_addr(address src_addr) const;
445   template <typename T> T get_buffered_addr(T src_addr) const {
446     CDS_ONLY(return (T)get_buffered_addr((address)src_addr);)
447     NOT_CDS(return nullptr;)
448   }
449 
450   address get_source_addr(address buffered_addr) const;
451   template <typename T> T get_source_addr(T buffered_addr) const {
452     return (T)get_source_addr((address)buffered_addr);
453   }
454 
455   // All klasses and symbols that will be copied into the archive
456   GrowableArray<Klass*>*  klasses() const { return _klasses; }
457   GrowableArray<Symbol*>* symbols() const { return _symbols; }
458 
459   static bool is_active() {
460     CDS_ONLY(return (_current != nullptr));
461     NOT_CDS(return false;)
462   }
463 
464   static ArchiveBuilder* current() {
465     assert(_current != nullptr, "ArchiveBuilder must be active");
466     return _current;
467   }
468 
469   static DumpAllocStats* alloc_stats() {
470     return &(current()->_alloc_stats);
471   }
472 
473   static CompactHashtableStats* symbol_stats() {
474     return alloc_stats()->symbol_stats();
475   }
476 
477   static CompactHashtableStats* string_stats() {
478     return alloc_stats()->string_stats();
479   }
480 
481   narrowKlass get_requested_narrow_klass(Klass* k);
482 
483   static Klass* get_buffered_klass(Klass* src_klass) {
484     Klass* klass = (Klass*)current()->get_buffered_addr((address)src_klass);
485     assert(klass != nullptr && klass->is_klass(), "must be");
486     return klass;
487   }
488 
489   static Symbol* get_buffered_symbol(Symbol* src_symbol) {
490     return (Symbol*)current()->get_buffered_addr((address)src_symbol);
491   }
492 
493   void print_stats();
494   void report_out_of_space(const char* name, size_t needed_bytes);
495 
496 #ifdef _LP64
497   // The CDS archive contains pre-computed narrow Klass IDs. It carries them in the headers of
498   // archived heap objects. With +UseCompactObjectHeaders, it also carries them in prototypes
499   // in Klass.
500   // When generating the archive, these narrow Klass IDs are computed using the following scheme:
501   // 1) The future encoding base is assumed to point to the first address of the generated mapping.
502   //    That means that at runtime, the narrow Klass encoding must be set up with base pointing to
503   //    the start address of the mapped CDS metadata archive (wherever that may be). This precludes
504   //    zero-based encoding.
505   // 2) The shift must be large enough to result in an encoding range that covers the future assumed
506   //    runtime Klass range. That future Klass range will contain both the CDS metadata archive and
507   //    the future runtime class space. Since we do not know the size of the future class space, we
508   //    need to chose an encoding base/shift combination that will result in a "large enough" size.
509   //    The details depend on whether we use compact object headers or legacy object headers.
510   //  In Legacy Mode, a narrow Klass ID is 32 bit. This gives us an encoding range size of 4G even
511   //    with shift = 0, which is all we need. Therefore, we use a shift=0 for pre-calculating the
512   //    narrow Klass IDs.
513   // TinyClassPointer Mode:
514   //    We use the highest possible shift value to maximize the encoding range size.
515   static int precomputed_narrow_klass_shift();
516 #endif // _LP64
517 
518 };
519 
520 #endif // SHARE_CDS_ARCHIVEBUILDER_HPP