< prev index next >

src/hotspot/share/cds/archiveHeapWriter.hpp

Print this page

 46 
 47 public:
 48   ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
 49   bool is_used() { return !_buffer_region.is_empty(); }
 50 
 51   MemRegion buffer_region() { return _buffer_region; }
 52   void set_buffer_region(MemRegion r) { _buffer_region = r; }
 53 
 54   char* buffer_start() { return (char*)_buffer_region.start(); }
 55   size_t buffer_byte_size() { return _buffer_region.byte_size();    }
 56 
 57   CHeapBitMap* oopmap() { return &_oopmap; }
 58   CHeapBitMap* ptrmap() { return &_ptrmap; }
 59 
 60   void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
 61   size_t heap_roots_offset() const { return _heap_roots_offset; }
 62 };
 63 
 64 #if INCLUDE_CDS_JAVA_HEAP
 65 class ArchiveHeapWriter : AllStatic {

 66   // ArchiveHeapWriter manipulates three types of addresses:
 67   //
 68   //     "source" vs "buffered" vs "requested"
 69   //
 70   // (Note: the design and convention is the same as for the archiving of Metaspace objects.
 71   //  See archiveBuilder.hpp.)
 72   //
 73   // - "source objects" are regular Java objects allocated during the execution
 74   //   of "java -Xshare:dump". They can be used as regular oops.
 75   //
 76   //   HeapShared::archive_objects() recursively searches for the oops that need to be
 77   //   stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
 78   //
 79   // - "buffered objects" are copies of the "source objects", and are stored in into
 80   //   ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
 81   //   the valid heap range. Therefore we avoid using the addresses of these copies
 82   //   as oops. They are usually called "buffered_addr" in the code (of the type "address").
 83   //
 84   //   The buffered objects are stored contiguously, possibly with interleaving fillers
 85   //   to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.

123   // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
124   // depends on -Xmx, but can never be smaller than 1 * M.
125   // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
126   static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
127 
128   static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
129 
130   // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
131   static size_t _buffer_used;
132 
133   // The bottom of the copy of Heap::roots() inside this->_buffer.
134   static size_t _heap_roots_offset;
135   static size_t _heap_roots_word_size;
136 
137   // The address range of the requested location of the archived heap objects.
138   static address _requested_bottom;
139   static address _requested_top;
140 
141   static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
142   static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;

143 
144   typedef ResourceHashtable<size_t, oop,
145       36137, // prime number
146       AnyObj::C_HEAP,
147       mtClassShared> BufferOffsetToSourceObjectTable;
148   static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
149 
150   static void allocate_buffer();
151   static void ensure_buffer_space(size_t min_bytes);
152 
153   // Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
154   static int to_array_index(size_t i) {
155     assert(i <= (size_t)max_jint, "must be");
156     return (int)i;
157   }
158   static int to_array_length(size_t n) {
159     return to_array_index(n);
160   }
161 
162   template <typename T> static T offset_to_buffered_address(size_t offset) {

164   }
165 
166   static address buffer_bottom() {
167     return offset_to_buffered_address<address>(0);
168   }
169 
170   // The exclusive end of the last object that was copied into the buffer.
171   static address buffer_top() {
172     return buffer_bottom() + _buffer_used;
173   }
174 
175   static bool in_buffer(address buffered_addr) {
176     return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
177   }
178 
179   static size_t buffered_address_to_offset(address buffered_addr) {
180     assert(in_buffer(buffered_addr), "sanity");
181     return buffered_addr - buffer_bottom();
182   }
183 
184   static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
185   static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);



186   static size_t copy_one_source_obj_to_buffer(oop src_obj);
187 
188   static void maybe_fill_gc_region_gap(size_t required_byte_size);
189   static size_t filler_array_byte_size(int length);
190   static int filler_array_length(size_t fill_bytes);
191   static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
192 
193   static void set_requested_address(ArchiveHeapInfo* info);
194   static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info);

195   static void compute_ptrmap(ArchiveHeapInfo *info);
196   static bool is_in_requested_range(oop o);
197   static oop requested_obj_from_buffer_offset(size_t offset);
198 
199   static oop load_oop_from_buffer(oop* buffered_addr);
200   static oop load_oop_from_buffer(narrowOop* buffered_addr);
201   inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
202   inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
203 
204   template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
205   template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
206 
207   template <typename T> static T* requested_addr_to_buffered_addr(T* p);
208   template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
209   template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
210   template <typename T> static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap);
211 
212   static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);














213 public:
214   static void init() NOT_CDS_JAVA_HEAP_RETURN;
215   static void add_source_obj(oop src_obj);
216   static bool is_too_large_to_archive(size_t size);
217   static bool is_too_large_to_archive(oop obj);
218   static bool is_string_too_large_to_archive(oop string);
219   static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
220   static address requested_address();  // requested address of the lowest achived heap object
221   static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
222   static address buffered_heap_roots_addr() {
223     return offset_to_buffered_address<address>(_heap_roots_offset);
224   }
225   static size_t heap_roots_word_size() {
226     return _heap_roots_word_size;
227   }
228   static size_t get_filler_size_at(address buffered_addr);



229 
230   static void mark_native_pointer(oop src_obj, int offset);
231   static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
232   static oop source_obj_to_requested_obj(oop src_obj);
233   static oop buffered_addr_to_source_obj(address buffered_addr);
234   static address buffered_addr_to_requested_addr(address buffered_addr);
235 
236   // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
237   // following scheme:
238   // 1) the encoding base must be the mapping start address.
239   // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
240   //    That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
241   //    size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
242   //    runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
243   //    Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
244   //    at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
245   static constexpr int precomputed_narrow_klass_shift = 0;
246 
247 };
248 #endif // INCLUDE_CDS_JAVA_HEAP

 46 
 47 public:
 48   ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
 49   bool is_used() { return !_buffer_region.is_empty(); }
 50 
 51   MemRegion buffer_region() { return _buffer_region; }
 52   void set_buffer_region(MemRegion r) { _buffer_region = r; }
 53 
 54   char* buffer_start() { return (char*)_buffer_region.start(); }
 55   size_t buffer_byte_size() { return _buffer_region.byte_size();    }
 56 
 57   CHeapBitMap* oopmap() { return &_oopmap; }
 58   CHeapBitMap* ptrmap() { return &_ptrmap; }
 59 
 60   void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
 61   size_t heap_roots_offset() const { return _heap_roots_offset; }
 62 };
 63 
 64 #if INCLUDE_CDS_JAVA_HEAP
 65 class ArchiveHeapWriter : AllStatic {
 66   friend class HeapShared;
 67   // ArchiveHeapWriter manipulates three types of addresses:
 68   //
 69   //     "source" vs "buffered" vs "requested"
 70   //
 71   // (Note: the design and convention is the same as for the archiving of Metaspace objects.
 72   //  See archiveBuilder.hpp.)
 73   //
 74   // - "source objects" are regular Java objects allocated during the execution
 75   //   of "java -Xshare:dump". They can be used as regular oops.
 76   //
 77   //   HeapShared::archive_objects() recursively searches for the oops that need to be
 78   //   stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
 79   //
 80   // - "buffered objects" are copies of the "source objects", and are stored in into
 81   //   ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
 82   //   the valid heap range. Therefore we avoid using the addresses of these copies
 83   //   as oops. They are usually called "buffered_addr" in the code (of the type "address").
 84   //
 85   //   The buffered objects are stored contiguously, possibly with interleaving fillers
 86   //   to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.

124   // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
125   // depends on -Xmx, but can never be smaller than 1 * M.
126   // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
127   static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
128 
129   static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
130 
131   // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
132   static size_t _buffer_used;
133 
134   // The bottom of the copy of Heap::roots() inside this->_buffer.
135   static size_t _heap_roots_offset;
136   static size_t _heap_roots_word_size;
137 
138   // The address range of the requested location of the archived heap objects.
139   static address _requested_bottom;
140   static address _requested_top;
141 
142   static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
143   static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
144   static GrowableArrayCHeap<oop, mtClassShared>* _perm_objs;
145 
146   typedef ResourceHashtable<size_t, oop,
147       36137, // prime number
148       AnyObj::C_HEAP,
149       mtClassShared> BufferOffsetToSourceObjectTable;
150   static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
151 
152   static void allocate_buffer();
153   static void ensure_buffer_space(size_t min_bytes);
154 
155   // Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
156   static int to_array_index(size_t i) {
157     assert(i <= (size_t)max_jint, "must be");
158     return (int)i;
159   }
160   static int to_array_length(size_t n) {
161     return to_array_index(n);
162   }
163 
164   template <typename T> static T offset_to_buffered_address(size_t offset) {

166   }
167 
168   static address buffer_bottom() {
169     return offset_to_buffered_address<address>(0);
170   }
171 
172   // The exclusive end of the last object that was copied into the buffer.
173   static address buffer_top() {
174     return buffer_bottom() + _buffer_used;
175   }
176 
177   static bool in_buffer(address buffered_addr) {
178     return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
179   }
180 
181   static size_t buffered_address_to_offset(address buffered_addr) {
182     assert(in_buffer(buffered_addr), "sanity");
183     return buffered_addr - buffer_bottom();
184   }
185 
186   static size_t create_objarray_in_buffer(GrowableArrayCHeap<oop, mtClassShared>* input, int from,
187                                           int num_elms, int extra_length, size_t& objarray_word_size);
188   static int copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots, GrowableArray<size_t>* permobj_seg_offsets);
189   template <typename T> static void add_permobj_segments_to_roots(GrowableArrayCHeap<oop, mtClassShared>* roots,
190                                                                   ArchiveHeapInfo* info, GrowableArray<size_t>* permobj_seg_offsets);
191   static size_t copy_one_source_obj_to_buffer(oop src_obj);
192 
193   static void maybe_fill_gc_region_gap(size_t required_byte_size);
194   static size_t filler_array_byte_size(int length);
195   static int filler_array_length(size_t fill_bytes);
196   static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
197 
198   static void set_requested_address(ArchiveHeapInfo* info);
199   static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info,
200                                      GrowableArray<size_t>* permobj_seg_offsets, int num_permobj);
201   static void compute_ptrmap(ArchiveHeapInfo *info);
202   static bool is_in_requested_range(oop o);
203   static oop requested_obj_from_buffer_offset(size_t offset);
204 
205   static oop load_oop_from_buffer(oop* buffered_addr);
206   static oop load_oop_from_buffer(narrowOop* buffered_addr);
207   inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
208   inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
209 
210   template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
211   template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
212 
213   template <typename T> static T* requested_addr_to_buffered_addr(T* p);
214   template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
215   template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
216   template <typename T> static void relocate_root_at(oop requested_roots, address buffered_roots_addr, int index, CHeapBitMap* oopmap);
217 
218   static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
219 
220   // "Permanent Objects"
221   //
222   // These objects are guaranteed to be in the heap at runtime. The AOT can use
223   // HeapShared::get_archived_object_permanent_index() and HeapShared::get_archived_object() to
224   // inline these objects into the AOT cache.
225   //
226   // Currently all archived objects are "permanent". We may want to narrow the scope ....
227   //
228   // The permobjs are divided into multiple segments, each containing 64K elements (or 4096 in debug builds).
229   // This is to avoid overflowing MIN_GC_REGION_ALIGNMENT.
230   static constexpr int PERMOBJ_SEGMENT_MAX_SHIFT  = DEBUG_ONLY(12) NOT_DEBUG(16);
231   static constexpr int PERMOBJ_SEGMENT_MAX_LENGTH = 1 << PERMOBJ_SEGMENT_MAX_SHIFT;
232   static constexpr int PERMOBJ_SEGMENT_MAX_MASK   = PERMOBJ_SEGMENT_MAX_LENGTH - 1;
233 public:
234   static void init() NOT_CDS_JAVA_HEAP_RETURN;
235   static void add_source_obj(oop src_obj);
236   static bool is_too_large_to_archive(size_t size);
237   static bool is_too_large_to_archive(oop obj);
238   static bool is_string_too_large_to_archive(oop string);
239   static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
240   static address requested_address();  // requested address of the lowest achived heap object
241   static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
242   static address buffered_heap_roots_addr() {
243     return offset_to_buffered_address<address>(_heap_roots_offset);
244   }
245   static size_t heap_roots_word_size() {
246     return _heap_roots_word_size;
247   }
248   static size_t get_filler_size_at(address buffered_addr);
249   static int get_permobj_segment_at(address buffered_addr, size_t* byte_size, int* permobj_segment_length);
250   static oop get_permobj_source_addr(int permobj_segment, int index);
251   static oop get_perm_object_by_index(int permanent_index);
252 
253   static void mark_native_pointer(oop src_obj, int offset);
254   static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
255   static oop source_obj_to_requested_obj(oop src_obj);
256   static oop buffered_addr_to_source_obj(address buffered_addr);
257   static address buffered_addr_to_requested_addr(address buffered_addr);
258 
259   // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
260   // following scheme:
261   // 1) the encoding base must be the mapping start address.
262   // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
263   //    That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
264   //    size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
265   //    runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
266   //    Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
267   //    at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
268   static constexpr int precomputed_narrow_klass_shift = 0;
269 
270 };
271 #endif // INCLUDE_CDS_JAVA_HEAP
< prev index next >