1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
46
47 public:
48 ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
49 bool is_used() { return !_buffer_region.is_empty(); }
50
51 MemRegion buffer_region() { return _buffer_region; }
52 void set_buffer_region(MemRegion r) { _buffer_region = r; }
53
54 char* buffer_start() { return (char*)_buffer_region.start(); }
55 size_t buffer_byte_size() { return _buffer_region.byte_size(); }
56
57 CHeapBitMap* oopmap() { return &_oopmap; }
58 CHeapBitMap* ptrmap() { return &_ptrmap; }
59
60 void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
61 size_t heap_roots_offset() const { return _heap_roots_offset; }
62 };
63
64 #if INCLUDE_CDS_JAVA_HEAP
65 class ArchiveHeapWriter : AllStatic {
66 // ArchiveHeapWriter manipulates three types of addresses:
67 //
68 // "source" vs "buffered" vs "requested"
69 //
70 // (Note: the design and convention is the same as for the archiving of Metaspace objects.
71 // See archiveBuilder.hpp.)
72 //
73 // - "source objects" are regular Java objects allocated during the execution
74 // of "java -Xshare:dump". They can be used as regular oops.
75 //
76 // HeapShared::archive_objects() recursively searches for the oops that need to be
77 // stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
78 //
79 // - "buffered objects" are copies of the "source objects", and are stored in into
80 // ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
81 // the valid heap range. Therefore we avoid using the addresses of these copies
82 // as oops. They are usually called "buffered_addr" in the code (of the type "address").
83 //
84 // The buffered objects are stored contiguously, possibly with interleaving fillers
85 // to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.
123 // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
124 // depends on -Xmx, but can never be smaller than 1 * M.
125 // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
126 static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
127
128 static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
129
130 // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
131 static size_t _buffer_used;
132
133 // The bottom of the copy of Heap::roots() inside this->_buffer.
134 static size_t _heap_roots_offset;
135 static size_t _heap_roots_word_size;
136
137 // The address range of the requested location of the archived heap objects.
138 static address _requested_bottom;
139 static address _requested_top;
140
141 static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
142 static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
143
144 // We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
145 // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
146 // The objects will be written in the order of:
147 //_source_objs->at(_source_objs_order->at(0)._index)
148 // source_objs->at(_source_objs_order->at(1)._index)
149 // source_objs->at(_source_objs_order->at(2)._index)
150 // ...
151 struct HeapObjOrder {
152 int _index; // The location of this object in _source_objs
153 int _rank; // A lower rank means the object will be written at a lower location.
154 };
155 static GrowableArrayCHeap<HeapObjOrder, mtClassShared>* _source_objs_order;
156
157 typedef ResizeableResourceHashtable<size_t, oop,
158 AnyObj::C_HEAP,
159 mtClassShared> BufferOffsetToSourceObjectTable;
160 static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
161
162 static void allocate_buffer();
176 }
177
178 static address buffer_bottom() {
179 return offset_to_buffered_address<address>(0);
180 }
181
182 // The exclusive end of the last object that was copied into the buffer.
183 static address buffer_top() {
184 return buffer_bottom() + _buffer_used;
185 }
186
187 static bool in_buffer(address buffered_addr) {
188 return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
189 }
190
191 static size_t buffered_address_to_offset(address buffered_addr) {
192 assert(in_buffer(buffered_addr), "sanity");
193 return buffered_addr - buffer_bottom();
194 }
195
196 static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
197 static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
198 static size_t copy_one_source_obj_to_buffer(oop src_obj);
199
200 static void maybe_fill_gc_region_gap(size_t required_byte_size);
201 static size_t filler_array_byte_size(int length);
202 static int filler_array_length(size_t fill_bytes);
203 static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
204
205 static void set_requested_address(ArchiveHeapInfo* info);
206 static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info);
207 static void compute_ptrmap(ArchiveHeapInfo *info);
208 static bool is_in_requested_range(oop o);
209 static oop requested_obj_from_buffer_offset(size_t offset);
210
211 static oop load_oop_from_buffer(oop* buffered_addr);
212 static oop load_oop_from_buffer(narrowOop* buffered_addr);
213 inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
214 inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
215
216 template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
217 template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
218
219 template <typename T> static T* requested_addr_to_buffered_addr(T* p);
220 template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
221 template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
222 template <typename T> static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap);
223
224 static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
225
226 static int compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b);
227 static void sort_source_objs();
228
229 public:
230 static void init() NOT_CDS_JAVA_HEAP_RETURN;
231 static void add_source_obj(oop src_obj);
232 static bool is_too_large_to_archive(size_t size);
233 static bool is_too_large_to_archive(oop obj);
234 static bool is_string_too_large_to_archive(oop string);
235 static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
236 static address requested_address(); // requested address of the lowest achived heap object
237 static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
238 static address buffered_heap_roots_addr() {
239 return offset_to_buffered_address<address>(_heap_roots_offset);
240 }
241 static size_t heap_roots_word_size() {
242 return _heap_roots_word_size;
243 }
244 static size_t get_filler_size_at(address buffered_addr);
245
246 static void mark_native_pointer(oop src_obj, int offset);
247 static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
248 static oop source_obj_to_requested_obj(oop src_obj);
249 static oop buffered_addr_to_source_obj(address buffered_addr);
250 static address buffered_addr_to_requested_addr(address buffered_addr);
251
252 // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
253 // following scheme:
254 // 1) the encoding base must be the mapping start address.
255 // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
256 // That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
257 // size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
258 // runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
259 // Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
260 // at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
261 static constexpr int precomputed_narrow_klass_shift = 0;
262
263 };
264 #endif // INCLUDE_CDS_JAVA_HEAP
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
46
47 public:
48 ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
49 bool is_used() { return !_buffer_region.is_empty(); }
50
51 MemRegion buffer_region() { return _buffer_region; }
52 void set_buffer_region(MemRegion r) { _buffer_region = r; }
53
54 char* buffer_start() { return (char*)_buffer_region.start(); }
55 size_t buffer_byte_size() { return _buffer_region.byte_size(); }
56
57 CHeapBitMap* oopmap() { return &_oopmap; }
58 CHeapBitMap* ptrmap() { return &_ptrmap; }
59
60 void set_heap_roots_offset(size_t n) { _heap_roots_offset = n; }
61 size_t heap_roots_offset() const { return _heap_roots_offset; }
62 };
63
64 #if INCLUDE_CDS_JAVA_HEAP
65 class ArchiveHeapWriter : AllStatic {
66 friend class HeapShared;
67 // ArchiveHeapWriter manipulates three types of addresses:
68 //
69 // "source" vs "buffered" vs "requested"
70 //
71 // (Note: the design and convention is the same as for the archiving of Metaspace objects.
72 // See archiveBuilder.hpp.)
73 //
74 // - "source objects" are regular Java objects allocated during the execution
75 // of "java -Xshare:dump". They can be used as regular oops.
76 //
77 // HeapShared::archive_objects() recursively searches for the oops that need to be
78 // stored into the CDS archive. These are entered into HeapShared::archived_object_cache().
79 //
80 // - "buffered objects" are copies of the "source objects", and are stored in into
81 // ArchiveHeapWriter::_buffer, which is a GrowableArray that sits outside of
82 // the valid heap range. Therefore we avoid using the addresses of these copies
83 // as oops. They are usually called "buffered_addr" in the code (of the type "address").
84 //
85 // The buffered objects are stored contiguously, possibly with interleaving fillers
86 // to make sure no objects span across boundaries of MIN_GC_REGION_ALIGNMENT.
124 // ArchiveHeapLoader::can_map() mode. Currently only G1 is supported. G1's region size
125 // depends on -Xmx, but can never be smaller than 1 * M.
126 // (TODO: Perhaps change to 256K to be compatible with Shenandoah)
127 static constexpr int MIN_GC_REGION_ALIGNMENT = 1 * M;
128
129 static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
130
131 // The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
132 static size_t _buffer_used;
133
134 // The bottom of the copy of Heap::roots() inside this->_buffer.
135 static size_t _heap_roots_offset;
136 static size_t _heap_roots_word_size;
137
138 // The address range of the requested location of the archived heap objects.
139 static address _requested_bottom;
140 static address _requested_top;
141
142 static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
143 static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
144 static GrowableArrayCHeap<oop, mtClassShared>* _perm_objs;
145
146 // We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
147 // See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
148 // The objects will be written in the order of:
149 //_source_objs->at(_source_objs_order->at(0)._index)
150 // source_objs->at(_source_objs_order->at(1)._index)
151 // source_objs->at(_source_objs_order->at(2)._index)
152 // ...
153 struct HeapObjOrder {
154 int _index; // The location of this object in _source_objs
155 int _rank; // A lower rank means the object will be written at a lower location.
156 };
157 static GrowableArrayCHeap<HeapObjOrder, mtClassShared>* _source_objs_order;
158
159 typedef ResizeableResourceHashtable<size_t, oop,
160 AnyObj::C_HEAP,
161 mtClassShared> BufferOffsetToSourceObjectTable;
162 static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
163
164 static void allocate_buffer();
178 }
179
180 static address buffer_bottom() {
181 return offset_to_buffered_address<address>(0);
182 }
183
184 // The exclusive end of the last object that was copied into the buffer.
185 static address buffer_top() {
186 return buffer_bottom() + _buffer_used;
187 }
188
189 static bool in_buffer(address buffered_addr) {
190 return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
191 }
192
193 static size_t buffered_address_to_offset(address buffered_addr) {
194 assert(in_buffer(buffered_addr), "sanity");
195 return buffered_addr - buffer_bottom();
196 }
197
198 static size_t create_objarray_in_buffer(GrowableArrayCHeap<oop, mtClassShared>* input, int from,
199 int num_elms, int extra_length, size_t& objarray_word_size);
200 static int copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots, GrowableArray<size_t>* permobj_seg_offsets);
201 template <typename T> static void add_permobj_segments_to_roots(GrowableArrayCHeap<oop, mtClassShared>* roots,
202 ArchiveHeapInfo* info, GrowableArray<size_t>* permobj_seg_offsets);
203 static void update_stats(oop src_obj);
204 static size_t copy_one_source_obj_to_buffer(oop src_obj);
205
206 static void maybe_fill_gc_region_gap(size_t required_byte_size);
207 static size_t filler_array_byte_size(int length);
208 static int filler_array_length(size_t fill_bytes);
209 static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
210
211 static void set_requested_address(ArchiveHeapInfo* info);
212 static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info,
213 GrowableArray<size_t>* permobj_seg_offsets, int num_permobj);
214 static void compute_ptrmap(ArchiveHeapInfo *info);
215 static bool is_in_requested_range(oop o);
216 static oop requested_obj_from_buffer_offset(size_t offset);
217
218 static oop load_oop_from_buffer(oop* buffered_addr);
219 static oop load_oop_from_buffer(narrowOop* buffered_addr);
220 inline static void store_oop_in_buffer(oop* buffered_addr, oop requested_obj);
221 inline static void store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj);
222
223 template <typename T> static oop load_source_oop_from_buffer(T* buffered_addr);
224 template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
225
226 template <typename T> static T* requested_addr_to_buffered_addr(T* p);
227 template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
228 template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
229 template <typename T> static void relocate_root_at(oop requested_roots, address buffered_roots_addr, int index, CHeapBitMap* oopmap);
230
231 static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
232
233 // "Permanent Objects"
234 //
235 // These objects are guaranteed to be in the heap at runtime. The AOT can use
236 // HeapShared::get_archived_object_permanent_index() and HeapShared::get_archived_object() to
237 // inline these objects into the AOT cache.
238 //
239 // Currently all archived objects are "permanent". We may want to narrow the scope ....
240 //
241 // The permobjs are divided into multiple segments, each containing 64K elements (or 4096 in debug builds).
242 // This is to avoid overflowing MIN_GC_REGION_ALIGNMENT.
243 static constexpr int PERMOBJ_SEGMENT_MAX_SHIFT = DEBUG_ONLY(12) NOT_DEBUG(16);
244 static constexpr int PERMOBJ_SEGMENT_MAX_LENGTH = 1 << PERMOBJ_SEGMENT_MAX_SHIFT;
245 static constexpr int PERMOBJ_SEGMENT_MAX_MASK = PERMOBJ_SEGMENT_MAX_LENGTH - 1;
246
247 static int compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b);
248 static void sort_source_objs();
249 public:
250 static void init() NOT_CDS_JAVA_HEAP_RETURN;
251 static void add_source_obj(oop src_obj);
252 static bool is_too_large_to_archive(size_t size);
253 static bool is_too_large_to_archive(oop obj);
254 static bool is_string_too_large_to_archive(oop string);
255 static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
256 static address requested_address(); // requested address of the lowest achived heap object
257 static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
258 static address buffered_heap_roots_addr() {
259 return offset_to_buffered_address<address>(_heap_roots_offset);
260 }
261 static size_t heap_roots_word_size() {
262 return _heap_roots_word_size;
263 }
264 static size_t get_filler_size_at(address buffered_addr);
265 static int get_permobj_segment_at(address buffered_addr, size_t* byte_size, int* permobj_segment_length);
266 static oop get_permobj_source_addr(int permobj_segment, int index);
267 static oop get_perm_object_by_index(int permanent_index);
268
269 static void mark_native_pointer(oop src_obj, int offset);
270 static bool is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset);
271 static oop source_obj_to_requested_obj(oop src_obj);
272 static oop buffered_addr_to_source_obj(address buffered_addr);
273 static address buffered_addr_to_requested_addr(address buffered_addr);
274
275 // Archived heap object headers carry pre-computed narrow Klass ids calculated with the
276 // following scheme:
277 // 1) the encoding base must be the mapping start address.
278 // 2) shift must be large enough to result in an encoding range that covers the runtime Klass range.
279 // That Klass range is defined by CDS archive size and runtime class space size. Luckily, the maximum
280 // size can be predicted: archive size is assumed to be <1G, class space size capped at 3G, and at
281 // runtime we put both regions adjacent to each other. Therefore, runtime Klass range size < 4G.
282 // Since nKlass itself is 32 bit, our encoding range len is 4G, and since we set the base directly
283 // at mapping start, these 4G are enough. Therefore, we don't need to shift at all (shift=0).
284 static constexpr int precomputed_narrow_klass_shift = 0;
285
286 };
287 #endif // INCLUDE_CDS_JAVA_HEAP
|