1 /*
  2  * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CDS_ARCHIVEHEAPWRITER_HPP
 26 #define SHARE_CDS_ARCHIVEHEAPWRITER_HPP
 27 
 28 #include "cds/heapShared.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "memory/allStatic.hpp"
 31 #include "oops/oopHandle.hpp"
 32 #include "utilities/bitMap.hpp"
 33 #include "utilities/exceptions.hpp"
 34 #include "utilities/growableArray.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "utilities/resourceHash.hpp"
 37 
 38 class MemRegion;
 39 
 40 class ArchiveHeapInfo {
 41   MemRegion _buffer_region;             // Contains the archived objects to be written into the CDS archive.
 42   CHeapBitMap _oopmap;
 43   CHeapBitMap _ptrmap;
 44   size_t _heap_roots_offset;            // Offset of the HeapShared::roots() object, from the bottom
 45                                         // of the archived heap objects, in bytes.
 46 
 47 public:
 48   ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
 49   bool is_used() { return !_buffer_region.is_empty(); }
 50 
 51   MemRegion buffer_region() { return _buffer_region; }
 52   void set_buffer_region(MemRegion r) { _buffer_region = r; }
 53 
 54   char* buffer_start() { return (char*)_buffer_region.start(); }
 55   size_t buffer_byte_size() { return _buffer_region.byte_size();    }
 56 
 57   CHeapBitMap* oopmap() { return &_oopmap; }
 58   CHeapBitMap* ptrmap() { return &_ptrmap; }
 59 
 60   void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
 61   size_t heap_roots_offset() const { return _heap_roots_offset; }
 62 };
 63 
 64 #if INCLUDE_CDS_JAVA_HEAP
 65 class ArchiveHeapWriter : AllStatic {
 66   // ArchiveHeapWriter manipulates three types of addresses:
 67   //
 68   //     "source" vs "buffered" vs "requested"
 69   //
 70   // (Note: the design and convention is the same as for the archiving of Metaspace objects.
 71   //  See archiveBuilder.hpp.)
 72   //
 73   // - "source objects" are regular Java objects allocated during the execution
 74   //   of "java -Xshare:dump". They can be used as regular oops.
 75   //
 76   //   HeapShared::archive_objects() recursively searches for the oops that need to be
 77   //   stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
 78   //
 79   // - "buffered objects" are copies of the "source objects", and are stored in into
 80   //   ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
 81   //   the valid heap range. Therefore we avoid using the addresses of these copies
 82   //   as oops. They are usually called "buffered_addr" in the code (of the type "address").
 83   //
 84   //   The buffered objects are stored contiguously, possibly with interleaving fillers
 85   //   to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.
 86   //
 87   // - Each archived object has a "requested address" -- at run time, if the object
 88   //   can be mapped at this address, we can avoid relocation.
 89   //
 90   // The requested address is implemented differently depending on UseCompressedOops:
 91   //
 92   // UseCompressedOops == true:
 93   //   The archived objects are stored assuming that the runtime COOPS compression
 94   //   scheme is exactly the same as in dump time (or else a more expensive runtime relocation
 95   //   would be needed.)
 96   //
 97   //   At dump time, we assume that the runtime heap range is exactly the same as
 98   //   in dump time. The requested addresses of the archived objects are chosen such that
 99   //   they would occupy the top end of a G1 heap (TBD when dumping is supported by other
100   //   collectors. See JDK-8298614).
101   //
102   // UseCompressedOops == false:
103   //   At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
104   //   need to perform relocation. Hence, the goal of the "requested address" is to ensure that
105   //   the contents of the archived objects are deterministic. I.e., the oop fields of archived
106   //   objects will always point to deterministic addresses.
107   //
108   //   For G1, the archived heap is written such that the lowest archived object is placed
109   //   at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
110   // ----------------------------------------------------------------------
111 
112 public:
113   static const intptr_t NOCOOPS_REQUESTED_BASE = 0x10000000;
114 
115 private:
116   class EmbeddedOopRelocator;
117   struct NativePointerInfo {
118     oop _src_obj;
119     int _field_offset;
120   };
121 
122   // The minimum region size of all collectors that are supported by CDS in
123   // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
124   // depends on -Xmx, but can never be smaller than 1 * M.
125   // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
126   static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
127 
128   static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
129 
130   // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
131   static size_t _buffer_used;
132 
133   // The bottom of the copy of Heap::roots() inside this->_buffer.
134   static size_t _heap_roots_offset;
135   static size_t _heap_roots_word_size;
136 
137   // The address range of the requested location of the archived heap objects.
138   static address _requested_bottom;
139   static address _requested_top;
140 
141   static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
142   static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
143 
144   // We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
145   // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
146   // The objects will be written in the order of:
147   //_source_objs->at(_source_objs_order->at(0)._index)
148   // source_objs->at(_source_objs_order->at(1)._index)
149   // source_objs->at(_source_objs_order->at(2)._index)
150   // ...
151   struct HeapObjOrder {
152     int _index;    // The location of this object in _source_objs
153     int _rank;     // A lower rank means the object will be written at a lower location.
154   };
155   static GrowableArrayCHeap<HeapObjOrder, mtClassShared>* _source_objs_order;
156 
157   typedef ResizeableResourceHashtable<size_t, oop,
158       AnyObj::C_HEAP,
159       mtClassShared> BufferOffsetToSourceObjectTable;
160   static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
161 
162   static void allocate_buffer();
163   static void ensure_buffer_space(size_t min_bytes);
164 
165   // Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
166   static int to_array_index(size_t i) {
167     assert(i <= (size_t)max_jint, "must be");
168     return (int)i;
169   }
170   static int to_array_length(size_t n) {
171     return to_array_index(n);
172   }
173 
174   template <typename T> static T offset_to_buffered_address(size_t offset) {
175     return (T)(_buffer->adr_at(to_array_index(offset)));
176   }
177 
178   static address buffer_bottom() {
179     return offset_to_buffered_address<address>(0);
180   }
181 
182   // The exclusive end of the last object that was copied into the buffer.
183   static address buffer_top() {
184     return buffer_bottom() + _buffer_used;
185   }
186 
187   static bool in_buffer(address buffered_addr) {
188     return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
189   }
190 
191   static size_t buffered_address_to_offset(address buffered_addr) {
192     assert(in_buffer(buffered_addr), "sanity");
193     return buffered_addr - buffer_bottom();
194   }
195 
196   static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
197   static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
198   static size_t copy_one_source_obj_to_buffer(oop src_obj);
199 
200   static void maybe_fill_gc_region_gap(size_t required_byte_size);
201   static size_t filler_array_byte_size(int length);
202   static int filler_array_length(size_t fill_bytes);
203   static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
204 
205   static void set_requested_address(ArchiveHeapInfo* info);
206   static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info);
207   static void compute_ptrmap(ArchiveHeapInfo *info);
208   static bool is_in_requested_range(oop o);
209   static oop requested_obj_from_buffer_offset(size_t offset);
210 
211   static oop load_oop_from_buffer(oop* buffered_addr);
212   static oop load_oop_from_buffer(narrowOop* buffered_addr);
213   inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
214   inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
215 
216   template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
217   template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
218 
219   template <typename T> static T* requested_addr_to_buffered_addr(T* p);
220   template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
221   template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
222   template <typename T> static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap);
223 
224   static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
225 
226   static int compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b);
227   static void sort_source_objs();
228 
229 public:
230   static void init() NOT_CDS_JAVA_HEAP_RETURN;
231   static void add_source_obj(oop src_obj);
232   static bool is_too_large_to_archive(size_t size);
233   static bool is_too_large_to_archive(oop obj);
234   static bool is_string_too_large_to_archive(oop string);
235   static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
236   static address requested_address();  // requested address of the lowest achived heap object
237   static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
238   static address buffered_heap_roots_addr() {
239     return offset_to_buffered_address<address>(_heap_roots_offset);
240   }
241   static size_t heap_roots_word_size() {
242     return _heap_roots_word_size;
243   }
244   static size_t get_filler_size_at(address buffered_addr);
245 
246   static void mark_native_pointer(oop src_obj, int offset);
247   static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
248   static oop source_obj_to_requested_obj(oop src_obj);
249   static oop buffered_addr_to_source_obj(address buffered_addr);
250   static address buffered_addr_to_requested_addr(address buffered_addr);
251 
252   // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
253   // following scheme:
254   // 1) the encoding base must be the mapping start address.
255   // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
256   //    That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
257   //    size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
258   //    runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
259   //    Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
260   //    at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
261   static constexpr int precomputed_narrow_klass_shift = 0;
262 
263 };
264 #endif // INCLUDE_CDS_JAVA_HEAP
265 #endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP