1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_CDS_ARCHIVEHEAPWRITER_HPP
 26 #define SHARE_CDS_ARCHIVEHEAPWRITER_HPP
 27 
 28 #include "cds/heapShared.hpp"
 29 #include "memory/allocation.hpp"
 30 #include "memory/allStatic.hpp"
 31 #include "oops/oopHandle.hpp"
 32 #include "utilities/bitMap.hpp"
 33 #include "utilities/exceptions.hpp"
 34 #include "utilities/growableArray.hpp"
 35 #include "utilities/macros.hpp"
 36 #include "utilities/resourceHash.hpp"
 37 
 38 class MemRegion;
 39 
 40 class ArchiveHeapInfo {
 41   MemRegion _buffer_region;             // Contains the archived objects to be written into the CDS archive.
 42   CHeapBitMap _oopmap;
 43   CHeapBitMap _ptrmap;
 44   size_t _heap_roots_offset;            // Offset of the HeapShared::roots() object, from the bottom
 45                                         // of the archived heap objects, in bytes.
 46 
 47 public:
 48   ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
 49   bool is_used() { return !_buffer_region.is_empty(); }
 50 
 51   MemRegion buffer_region() { return _buffer_region; }
 52   void set_buffer_region(MemRegion r) { _buffer_region = r; }
 53 
 54   char* buffer_start() { return (char*)_buffer_region.start(); }
 55   size_t buffer_byte_size() { return _buffer_region.byte_size();    }
 56 
 57   CHeapBitMap* oopmap() { return &_oopmap; }
 58   CHeapBitMap* ptrmap() { return &_ptrmap; }
 59 
 60   void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
 61   size_t heap_roots_offset() const { return _heap_roots_offset; }
 62 };
 63 
 64 #if INCLUDE_CDS_JAVA_HEAP
 65 class ArchiveHeapWriter : AllStatic {
 66   friend class HeapShared;
 67   // ArchiveHeapWriter manipulates three types of addresses:
 68   //
 69   //     "source" vs "buffered" vs "requested"
 70   //
 71   // (Note: the design and convention is the same as for the archiving of Metaspace objects.
 72   //  See archiveBuilder.hpp.)
 73   //
 74   // - "source objects" are regular Java objects allocated during the execution
 75   //   of "java -Xshare:dump". They can be used as regular oops.
 76   //
 77   //   HeapShared::archive_objects() recursively searches for the oops that need to be
 78   //   stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
 79   //
 80   // - "buffered objects" are copies of the "source objects", and are stored in into
 81   //   ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
 82   //   the valid heap range. Therefore we avoid using the addresses of these copies
 83   //   as oops. They are usually called "buffered_addr" in the code (of the type "address").
 84   //
 85   //   The buffered objects are stored contiguously, possibly with interleaving fillers
 86   //   to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.
 87   //
 88   // - Each archived object has a "requested address" -- at run time, if the object
 89   //   can be mapped at this address, we can avoid relocation.
 90   //
 91   // The requested address is implemented differently depending on UseCompressedOops:
 92   //
 93   // UseCompressedOops == true:
 94   //   The archived objects are stored assuming that the runtime COOPS compression
 95   //   scheme is exactly the same as in dump time (or else a more expensive runtime relocation
 96   //   would be needed.)
 97   //
 98   //   At dump time, we assume that the runtime heap range is exactly the same as
 99   //   in dump time. The requested addresses of the archived objects are chosen such that
100   //   they would occupy the top end of a G1 heap (TBD when dumping is supported by other
101   //   collectors. See JDK-8298614).
102   //
103   // UseCompressedOops == false:
104   //   At runtime, the heap range is usually picked (randomly) by the OS, so we will almost always
105   //   need to perform relocation. Hence, the goal of the "requested address" is to ensure that
106   //   the contents of the archived objects are deterministic. I.e., the oop fields of archived
107   //   objects will always point to deterministic addresses.
108   //
109   //   For G1, the archived heap is written such that the lowest archived object is placed
110   //   at NOCOOPS_REQUESTED_BASE. (TBD after JDK-8298614).
111   // ----------------------------------------------------------------------
112 
113 public:
114   static const intptr_t NOCOOPS_REQUESTED_BASE = 0x10000000;
115 
116 private:
117   class EmbeddedOopRelocator;
118   struct NativePointerInfo {
119     oop _src_obj;
120     int _field_offset;
121   };
122 
123   // The minimum region size of all collectors that are supported by CDS in
124   // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
125   // depends on -Xmx, but can never be smaller than 1 * M.
126   // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
127   static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
128 
129   static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
130 
131   // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
132   static size_t _buffer_used;
133 
134   // The bottom of the copy of Heap::roots() inside this->_buffer.
135   static size_t _heap_roots_offset;
136   static size_t _heap_roots_word_size;
137 
138   // The address range of the requested location of the archived heap objects.
139   static address _requested_bottom;
140   static address _requested_top;
141 
142   static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
143   static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
144   static GrowableArrayCHeap<oop, mtClassShared>* _perm_objs;
145 
146   typedef ResourceHashtable<size_t, oop,
147       36137, // prime number
148       AnyObj::C_HEAP,
149       mtClassShared> BufferOffsetToSourceObjectTable;
150   static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
151 
152   static void allocate_buffer();
153   static void ensure_buffer_space(size_t min_bytes);
154 
155   // Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
156   static int to_array_index(size_t i) {
157     assert(i <= (size_t)max_jint, "must be");
158     return (int)i;
159   }
160   static int to_array_length(size_t n) {
161     return to_array_index(n);
162   }
163 
164   template <typename T> static T offset_to_buffered_address(size_t offset) {
165     return (T)(_buffer->adr_at(to_array_index(offset)));
166   }
167 
168   static address buffer_bottom() {
169     return offset_to_buffered_address<address>(0);
170   }
171 
172   // The exclusive end of the last object that was copied into the buffer.
173   static address buffer_top() {
174     return buffer_bottom() + _buffer_used;
175   }
176 
177   static bool in_buffer(address buffered_addr) {
178     return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
179   }
180 
181   static size_t buffered_address_to_offset(address buffered_addr) {
182     assert(in_buffer(buffered_addr), "sanity");
183     return buffered_addr - buffer_bottom();
184   }
185 
186   static size_t create_objarray_in_buffer(GrowableArrayCHeap<oop, mtClassShared>* input, int from,
187                                           int num_elms, int extra_length, size_t& objarray_word_size);
188   static int copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots, GrowableArray<size_t>* permobj_seg_offsets);
189   template <typename T> static void add_permobj_segments_to_roots(GrowableArrayCHeap<oop, mtClassShared>* roots,
190                                                                   ArchiveHeapInfo* info, GrowableArray<size_t>* permobj_seg_offsets);
191   static size_t copy_one_source_obj_to_buffer(oop src_obj);
192 
193   static void maybe_fill_gc_region_gap(size_t required_byte_size);
194   static size_t filler_array_byte_size(int length);
195   static int filler_array_length(size_t fill_bytes);
196   static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
197 
198   static void set_requested_address(ArchiveHeapInfo* info);
199   static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info,
200                                      GrowableArray<size_t>* permobj_seg_offsets, int num_permobj);
201   static void compute_ptrmap(ArchiveHeapInfo *info);
202   static bool is_in_requested_range(oop o);
203   static oop requested_obj_from_buffer_offset(size_t offset);
204 
205   static oop load_oop_from_buffer(oop* buffered_addr);
206   static oop load_oop_from_buffer(narrowOop* buffered_addr);
207   inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
208   inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
209 
210   template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
211   template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
212 
213   template <typename T> static T* requested_addr_to_buffered_addr(T* p);
214   template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
215   template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
216   template <typename T> static void relocate_root_at(oop requested_roots, address buffered_roots_addr, int index, CHeapBitMap* oopmap);
217 
218   static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
219 
220   // "Permanent Objects"
221   //
222   // These objects are guaranteed to be in the heap at runtime. The AOT can use
223   // HeapShared::get_archived_object_permanent_index() and HeapShared::get_archived_object() to
224   // inline these objects into the AOT cache.
225   //
226   // Currently all archived objects are "permanent". We may want to narrow the scope ....
227   //
228   // The permobjs are divided into multiple segments, each containing 64K elements (or 4096 in debug builds).
229   // This is to avoid overflowing MIN_GC_REGION_ALIGNMENT.
230   static constexpr int PERMOBJ_SEGMENT_MAX_SHIFT  = DEBUG_ONLY(12) NOT_DEBUG(16);
231   static constexpr int PERMOBJ_SEGMENT_MAX_LENGTH = 1 << PERMOBJ_SEGMENT_MAX_SHIFT;
232   static constexpr int PERMOBJ_SEGMENT_MAX_MASK   = PERMOBJ_SEGMENT_MAX_LENGTH - 1;
233 public:
234   static void init() NOT_CDS_JAVA_HEAP_RETURN;
235   static void add_source_obj(oop src_obj);
236   static bool is_too_large_to_archive(size_t size);
237   static bool is_too_large_to_archive(oop obj);
238   static bool is_string_too_large_to_archive(oop string);
239   static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
240   static address requested_address();  // requested address of the lowest achived heap object
241   static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
242   static address buffered_heap_roots_addr() {
243     return offset_to_buffered_address<address>(_heap_roots_offset);
244   }
245   static size_t heap_roots_word_size() {
246     return _heap_roots_word_size;
247   }
248   static size_t get_filler_size_at(address buffered_addr);
249   static int get_permobj_segment_at(address buffered_addr, size_t* byte_size, int* permobj_segment_length);
250   static oop get_permobj_source_addr(int permobj_segment, int index);
251   static oop get_perm_object_by_index(int permanent_index);
252 
253   static void mark_native_pointer(oop src_obj, int offset);
254   static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
255   static oop source_obj_to_requested_obj(oop src_obj);
256   static oop buffered_addr_to_source_obj(address buffered_addr);
257   static address buffered_addr_to_requested_addr(address buffered_addr);
258 
259   // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
260   // following scheme:
261   // 1) the encoding base must be the mapping start address.
262   // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
263   //    That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
264   //    size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
265   //    runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
266   //    Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
267   //    at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
268   static constexpr int precomputed_narrow_klass_shift = 0;
269 
270 };
271 #endif // INCLUDE_CDS_JAVA_HEAP
272 #endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP