1 /*
2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMappedHeapLoader.hpp"
26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 // The following are offsets from buffer_bottom()
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
66
67 static size_t _num_strings = 0;
68 static size_t _string_bytes = 0;
69 static size_t _num_packages = 0;
70 static size_t _num_protection_domains = 0;
71
72 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
73 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
74 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
75
76 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
77 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
78
79 DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
80
81 typedef HashTable<
82 size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
83 size_t, // size of this filler (in bytes)
84 127, // prime number
85 AnyObj::C_HEAP,
86 mtClassShared> FillersTable;
87 static FillersTable* _fillers;
88 static int _num_native_ptrs = 0;
89
90 void AOTMappedHeapWriter::init() {
91 if (CDSConfig::is_dumping_heap()) {
92 Universe::heap()->collect(GCCause::_java_lang_system_gc);
93
94 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
95 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
96 _fillers = new FillersTable();
97 _requested_bottom = nullptr;
98 _requested_top = nullptr;
99
100 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
101 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
102
103 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
104 }
105 }
106
107 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
108 delete _source_objs;
109 _source_objs = nullptr;
110
111 delete _dumped_interned_strings;
112 _dumped_interned_strings = nullptr;
113 }
114
115 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
116 _source_objs->append(src_obj);
117 }
118
119 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
120 ArchiveMappedHeapInfo* heap_info) {
121 assert(CDSConfig::is_dumping_heap(), "sanity");
122 allocate_buffer();
123 copy_source_objs_to_buffer(roots);
124 set_requested_address(heap_info);
125 relocate_embedded_oops(roots, heap_info);
126 }
127
128 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
129 return is_too_large_to_archive(o->size());
130 }
131
132 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
133 typeArrayOop value = java_lang_String::value_no_keepalive(string);
134 return is_too_large_to_archive(value);
135 }
136
137 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
138 assert(size > 0, "no zero-size object");
139 assert(size * HeapWordSize > size, "no overflow");
140 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
141
142 size_t byte_size = size * HeapWordSize;
143 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
144 return true;
145 } else {
146 return false;
147 }
148 }
149
150 // Keep track of the contents of the archived interned string table. This table
151 // is used only by CDSHeapVerifier.
152 void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
153 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
154 assert(!is_string_too_large_to_archive(string), "must be");
155 bool created;
156 _dumped_interned_strings->put_if_absent(string, true, &created);
157 if (created) {
158 // Prevent string deduplication from changing the value field to
159 // something not in the archive.
160 java_lang_String::set_deduplication_forbidden(string);
161 _dumped_interned_strings->maybe_grow();
162 }
163 }
164
165 bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
166 return _dumped_interned_strings->get(o) != nullptr;
167 }
168
169 // Various lookup functions between source_obj, buffered_obj and requested_obj
170 bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
171 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
172 address a = cast_from_oop<address>(o);
173 return (_requested_bottom <= a && a < _requested_top);
174 }
175
176 oop AOTMappedHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
177 oop req_obj = cast_to_oop(_requested_bottom + offset);
178 assert(is_in_requested_range(req_obj), "must be");
179 return req_obj;
180 }
181
182 oop AOTMappedHeapWriter::source_obj_to_requested_obj(oop src_obj) {
183 assert(CDSConfig::is_dumping_heap(), "dump-time only");
184 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
185 if (p != nullptr) {
186 return requested_obj_from_buffer_offset(p->buffer_offset());
187 } else {
188 return nullptr;
189 }
190 }
191
192 oop AOTMappedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
193 OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
194 if (oh != nullptr) {
195 return oh->resolve();
196 } else {
197 return nullptr;
198 }
199 }
200
201 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
202 oop p = buffered_addr_to_source_obj(buffered_addr);
203 if (p != nullptr) {
204 return p->klass();
205 } else if (get_filler_size_at(buffered_addr) > 0) {
206 return Universe::fillerArrayKlass();
207 } else {
208 // This is one of the root segments
209 return Universe::objectArrayKlass();
210 }
211 }
212
213 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
214 oop p = buffered_addr_to_source_obj(buffered_addr);
215 if (p != nullptr) {
216 return p->size();
217 }
218
219 size_t nbytes = get_filler_size_at(buffered_addr);
220 if (nbytes > 0) {
221 assert((nbytes % BytesPerWord) == 0, "should be aligned");
222 return nbytes / BytesPerWord;
223 }
224
225 address hrs = buffer_bottom();
226 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
227 nbytes = _heap_root_segments.size_in_bytes(seg_idx);
228 if (hrs == buffered_addr) {
229 assert((nbytes % BytesPerWord) == 0, "should be aligned");
230 return nbytes / BytesPerWord;
231 }
232 hrs += nbytes;
233 }
234
235 ShouldNotReachHere();
236 return 0;
237 }
238
239 address AOTMappedHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
240 return _requested_bottom + buffered_address_to_offset(buffered_addr);
241 }
242
243 address AOTMappedHeapWriter::requested_address() {
244 assert(_buffer != nullptr, "must be initialized");
245 return _requested_bottom;
246 }
247
248 void AOTMappedHeapWriter::allocate_buffer() {
249 int initial_buffer_size = 100000;
250 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
251 _buffer_used = 0;
252 ensure_buffer_space(1); // so that buffer_bottom() works
253 }
254
255 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
256 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
257 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
258 _buffer->at_grow(to_array_index(min_bytes));
259 }
260
261 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
262 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
263 memset(mem, 0, objArrayOopDesc::object_size(element_count));
264
265 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
266 if (UseCompactObjectHeaders) {
267 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
268 } else {
269 oopDesc::set_mark(mem, markWord::prototype());
270 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
271 }
272 arrayOopDesc::set_length(mem, element_count);
273 return objArrayOop(cast_to_oop(mem));
274 }
275
276 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
277 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
278 if (UseCompressedOops) {
279 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
280 } else {
281 *segment->obj_at_addr<oop>(index) = root;
282 }
283 }
284
285 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
286 // Depending on the number of classes we are archiving, a single roots array may be
287 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
288 // allows us to chop the large array into a series of "segments". Current layout
289 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
290 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
291 // This is simple and efficient. We do not need filler objects anywhere between the segments,
292 // or immediately after the last segment. This allows starting the object dump immediately
293 // after the roots.
294
295 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
296 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
297
298 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
299 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
300 "Should match exactly");
301
302 HeapRootSegments segments(_buffer_used,
303 roots->length(),
304 MIN_GC_REGION_ALIGNMENT,
305 max_elem_count);
306
307 int root_index = 0;
308 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
309 int size_elems = segments.size_in_elems(seg_idx);
310 size_t size_bytes = segments.size_in_bytes(seg_idx);
311
312 size_t oop_offset = _buffer_used;
313 _buffer_used = oop_offset + size_bytes;
314 ensure_buffer_space(_buffer_used);
315
316 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
317 "Roots segment %zu start is not aligned: %zu",
318 segments.count(), oop_offset);
319
320 objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems);
321 for (int i = 0; i < size_elems; i++) {
322 root_segment_at_put(seg_oop, i, roots->at(root_index++));
323 }
324
325 log_info(aot, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT,
326 size_elems, size_bytes, p2i(seg_oop));
327 }
328
329 assert(root_index == roots->length(), "Post-condition: All roots are handled");
330
331 _heap_root_segments = segments;
332 }
333
334 // The goal is to sort the objects in increasing order of:
335 // - objects that have only oop pointers
336 // - objects that have both native and oop pointers
337 // - objects that have only native pointers
338 // - objects that have no pointers
339 static int oop_sorting_rank(oop o) {
340 bool has_oop_ptr, has_native_ptr;
341 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
342
343 if (has_oop_ptr) {
344 if (!has_native_ptr) {
345 return 0;
346 } else {
347 return 1;
348 }
349 } else {
350 if (has_native_ptr) {
351 return 2;
352 } else {
353 return 3;
354 }
355 }
356 }
357
358 int AOTMappedHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
359 int rank_a = a->_rank;
360 int rank_b = b->_rank;
361
362 if (rank_a != rank_b) {
363 return rank_a - rank_b;
364 } else {
365 // If they are the same rank, sort them by their position in the _source_objs array
366 return a->_index - b->_index;
367 }
368 }
369
370 void AOTMappedHeapWriter::sort_source_objs() {
371 log_info(aot)("sorting heap objects");
372 int len = _source_objs->length();
373 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
374
375 for (int i = 0; i < len; i++) {
376 oop o = _source_objs->at(i);
377 int rank = oop_sorting_rank(o);
378 HeapObjOrder os = {i, rank};
379 _source_objs_order->append(os);
380 }
381 log_info(aot)("computed ranks");
382 _source_objs_order->sort(compare_objs_by_oop_fields);
383 log_info(aot)("sorting heap objects done");
384 }
385
386 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
387 // There could be multiple root segments, which we want to be aligned by region.
388 // Putting them ahead of objects makes sure we waste no space.
389 copy_roots_to_buffer(roots);
390
391 sort_source_objs();
392 for (int i = 0; i < _source_objs_order->length(); i++) {
393 int src_obj_index = _source_objs_order->at(i)._index;
394 oop src_obj = _source_objs->at(src_obj_index);
395 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
396 assert(info != nullptr, "must be");
397 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
398 info->set_buffer_offset(buffer_offset);
399 assert(buffer_offset <= 0x7fffffff, "sanity");
400 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
401
402 OopHandle handle(Universe::vm_global(), src_obj);
403 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
404 _buffer_offset_to_source_obj_table->maybe_grow();
405
406 if (java_lang_Module::is_instance(src_obj)) {
407 Modules::check_archived_module_oop(src_obj);
408 }
409 }
410
411 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
412 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
413 log_info(cds)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
414 log_info(cds)(" packages = %8zu", _num_packages);
415 log_info(cds)(" protection domains = %8zu", _num_protection_domains);
416 }
417
418 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
419 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
420 return byte_size;
421 }
422
423 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
424 assert(is_object_aligned(fill_bytes), "must be");
425 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
426
427 int initial_length = to_array_length(fill_bytes / elemSize);
428 for (int length = initial_length; length >= 0; length --) {
429 size_t array_byte_size = filler_array_byte_size(length);
430 if (array_byte_size == fill_bytes) {
431 return length;
432 }
433 }
434
435 ShouldNotReachHere();
436 return -1;
437 }
438
439 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
440 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
441 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
442 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
443 memset(mem, 0, fill_bytes);
444 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
445 if (UseCompactObjectHeaders) {
446 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
447 } else {
448 oopDesc::set_mark(mem, markWord::prototype());
449 cast_to_oop(mem)->set_narrow_klass(nk);
450 }
451 arrayOopDesc::set_length(mem, array_length);
452 return mem;
453 }
454
455 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
456 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
457 // leftover space is smaller than a zero-sized array object). Therefore, we need to
458 // make sure there's enough space of min_filler_byte_size in the current region after
459 // required_byte_size has been allocated. If not, fill the remainder of the current
460 // region.
461 size_t min_filler_byte_size = filler_array_byte_size(0);
462 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
463
464 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
465 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
466
467 if (cur_min_region_bottom != next_min_region_bottom) {
468 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
469 // we can map the region in any region-based collector.
470 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
471 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
472 "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT);
473
474 const size_t filler_end = next_min_region_bottom;
475 const size_t fill_bytes = filler_end - _buffer_used;
476 assert(fill_bytes > 0, "must be");
477 ensure_buffer_space(filler_end);
478
479 int array_length = filler_array_length(fill_bytes);
480 log_info(aot, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu",
481 array_length, fill_bytes, _buffer_used);
482 HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
483 _buffer_used = filler_end;
484 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
485 }
486 }
487
488 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
489 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
490 if (p != nullptr) {
491 assert(*p > 0, "filler must be larger than zero bytes");
492 return *p;
493 } else {
494 return 0; // buffered_addr is not a filler
495 }
496 }
497
498 template <typename T>
499 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
500 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
501 *field_addr = value;
502 }
503
504 void AOTMappedHeapWriter::update_stats(oop src_obj) {
505 if (java_lang_String::is_instance(src_obj)) {
506 _num_strings ++;
507 _string_bytes += src_obj->size() * HeapWordSize;
508 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
509 } else {
510 Klass* k = src_obj->klass();
511 Symbol* name = k->name();
512 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
513 _num_packages ++;
514 } else if (name->equals("java/security/ProtectionDomain")) {
515 _num_protection_domains ++;
516 }
517 }
518 }
519
520 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
521 update_stats(src_obj);
522
523 assert(!is_too_large_to_archive(src_obj), "already checked");
524 size_t byte_size = src_obj->size() * HeapWordSize;
525 assert(byte_size > 0, "no zero-size objects");
526
527 // For region-based collectors such as G1, the archive heap may be mapped into
528 // multiple regions. We need to make sure that we don't have an object that can possible
529 // span across two regions.
530 maybe_fill_gc_region_gap(byte_size);
531
532 size_t new_used = _buffer_used + byte_size;
533 assert(new_used > _buffer_used, "no wrap around");
534
535 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
536 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
537 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
538
539 ensure_buffer_space(new_used);
540
541 address from = cast_from_oop<address>(src_obj);
542 address to = offset_to_buffered_address<address>(_buffer_used);
543 assert(is_object_aligned(_buffer_used), "sanity");
544 assert(is_object_aligned(byte_size), "sanity");
545 memcpy(to, from, byte_size);
546
547 // These native pointers will be restored explicitly at run time.
548 if (java_lang_Module::is_instance(src_obj)) {
549 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
550 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
551 #ifdef ASSERT
552 // We only archive these loaders
553 if (src_obj != SystemDictionary::java_platform_loader() &&
554 src_obj != SystemDictionary::java_system_loader()) {
555 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
556 }
557 #endif
558 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
559 }
560
561 size_t buffered_obj_offset = _buffer_used;
562 _buffer_used = new_used;
563
564 return buffered_obj_offset;
565 }
566
567 void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
568 assert(!info->is_used(), "only set once");
569
570 size_t heap_region_byte_size = _buffer_used;
571 assert(heap_region_byte_size > 0, "must archived at least one object!");
572
573 if (UseCompressedOops) {
574 if (UseG1GC) {
575 address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
576 log_info(aot, heap)("Heap end = %p", heap_end);
577 _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
578 _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT);
579 assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
580 } else {
581 _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT);
582 }
583 } else {
584 // We always write the objects as if the heap started at this address. This
585 // makes the contents of the archive heap deterministic.
586 //
587 // Note that at runtime, the heap address is selected by the OS, so the archive
588 // heap will not be mapped at 0x10000000, and the contents need to be patched.
589 _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT);
590 }
591
592 assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity");
593
594 _requested_top = _requested_bottom + _buffer_used;
595
596 info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
597 offset_to_buffered_address<HeapWord*>(_buffer_used)));
598 info->set_root_segments(_heap_root_segments);
599 }
600
601 // Oop relocation
602
603 template <typename T> T* AOTMappedHeapWriter::requested_addr_to_buffered_addr(T* p) {
604 assert(is_in_requested_range(cast_to_oop(p)), "must be");
605
606 address addr = address(p);
607 assert(addr >= _requested_bottom, "must be");
608 size_t offset = addr - _requested_bottom;
609 return offset_to_buffered_address<T*>(offset);
610 }
611
612 template <typename T> oop AOTMappedHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
613 oop o = load_oop_from_buffer(buffered_addr);
614 assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
615 return o;
616 }
617
618 template <typename T> void AOTMappedHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
619 oop request_oop) {
620 assert(request_oop == nullptr || is_in_requested_range(request_oop), "must be");
621 store_oop_in_buffer(buffered_addr, request_oop);
622 }
623
624 inline void AOTMappedHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
625 *buffered_addr = requested_obj;
626 }
627
628 inline void AOTMappedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
629 narrowOop val = CompressedOops::encode(requested_obj);
630 *buffered_addr = val;
631 }
632
633 oop AOTMappedHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
634 return *buffered_addr;
635 }
636
637 oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
638 return CompressedOops::decode(*buffered_addr);
639 }
640
641 template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
642 oop request_referent = source_obj_to_requested_obj(source_referent);
643 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
644 if (request_referent != nullptr) {
645 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
646 }
647 }
648
649 template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
650 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
651 address requested_region_bottom;
652
653 assert(request_p >= (T*)_requested_bottom, "sanity");
654 assert(request_p < (T*)_requested_top, "sanity");
655 requested_region_bottom = _requested_bottom;
656
657 // Mark the pointer in the oopmap
658 T* region_bottom = (T*)requested_region_bottom;
659 assert(request_p >= region_bottom, "must be");
660 BitMap::idx_t idx = request_p - region_bottom;
661 assert(idx < oopmap->size(), "overflow");
662 oopmap->set_bit(idx);
663 }
664
665 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
666 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
667 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
668 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
669
670 oop fake_oop = cast_to_oop(buffered_addr);
671 if (UseCompactObjectHeaders) {
672 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
673 } else {
674 fake_oop->set_narrow_klass(nk);
675 }
676
677 if (src_obj == nullptr) {
678 return;
679 }
680 // We need to retain the identity_hash, because it may have been used by some hashtables
681 // in the shared heap.
682 if (!src_obj->fast_no_hash_check()) {
683 intptr_t src_hash = src_obj->identity_hash();
684 if (UseCompactObjectHeaders) {
685 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
686 } else {
687 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
688 }
689 assert(fake_oop->mark().is_unlocked(), "sanity");
690
691 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
692 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
693 }
694 // Strip age bits.
695 fake_oop->set_mark(fake_oop->mark().set_age(0));
696 }
697
698 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
699 oop _src_obj;
700 address _buffered_obj;
701 CHeapBitMap* _oopmap;
702 bool _is_java_lang_ref;
703 public:
704 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
705 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
706 {
707 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
708 }
709
710 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
711 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
712
713 private:
714 template <class T> void do_oop_work(T *p) {
715 int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
716 T* field_addr = (T*)(_buffered_obj + field_offset);
717 oop referent = load_source_oop_from_buffer<T>(field_addr);
718 referent = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, referent);
719 AOTMappedHeapWriter::relocate_field_in_buffer<T>(field_addr, referent, _oopmap);
720 }
721 };
722
723 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
724 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
725 size_t start = bitmap->find_first_set_bit(0);
726 size_t end = bitmap->size();
727 log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
728 start, end,
729 start * 100 / total_bits,
730 end * 100 / total_bits,
731 (end - start) * 100 / total_bits);
732 }
733
734 // Update all oop fields embedded in the buffered objects
735 void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
736 ArchiveMappedHeapInfo* heap_info) {
737 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
738 size_t heap_region_byte_size = _buffer_used;
739 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
740
741 for (int i = 0; i < _source_objs_order->length(); i++) {
742 int src_obj_index = _source_objs_order->at(i)._index;
743 oop src_obj = _source_objs->at(src_obj_index);
744 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
745 assert(info != nullptr, "must be");
746 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
747 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
748 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
749 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
750 src_obj->oop_iterate(&relocator);
751 mark_native_pointers(src_obj);
752 };
753
754 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
755 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
756 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
757 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
758
759 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
760 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
761 address buffered_obj = offset_to_buffered_address<address>(seg_offset);
762 int length = _heap_root_segments.size_in_elems(seg_idx);
763
764 size_t elem_size = UseCompressedOops ? sizeof(narrowOop) : sizeof(oop);
765
766 for (int i = 0; i < length; i++) {
767 // There is no source object; these are native oops - load, translate and
768 // write back
769 size_t elem_offset = objArrayOopDesc::base_offset_in_bytes() + elem_size * i;
770 HeapWord* elem_addr = (HeapWord*)(buffered_obj + elem_offset);
771 oop obj = NativeAccess<>::oop_load(elem_addr);
772 obj = HeapShared::maybe_remap_referent(false /* is_reference_field */, elem_offset, obj);
773 if (UseCompressedOops) {
774 relocate_field_in_buffer<narrowOop>((narrowOop*)elem_addr, obj, heap_info->oopmap());
775 } else {
776 relocate_field_in_buffer<oop>((oop*)elem_addr, obj, heap_info->oopmap());
777 }
778 }
779 }
780
781 compute_ptrmap(heap_info);
782
783 size_t total_bytes = (size_t)_buffer->length();
784 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
785 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
786 }
787
788 void AOTMappedHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
789 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
790 if (ptr != nullptr) {
791 NativePointerInfo info;
792 info._src_obj = src_obj;
793 info._field_offset = field_offset;
794 _native_pointers->append(info);
795 HeapShared::set_has_native_pointers(src_obj);
796 _num_native_ptrs ++;
797 }
798 }
799
800 void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
801 HeapShared::do_metadata_offsets(orig_obj, [&](int offset) {
802 mark_native_pointer(orig_obj, offset);
803 });
804 }
805
806 void AOTMappedHeapWriter::compute_ptrmap(ArchiveMappedHeapInfo* heap_info) {
807 int num_non_null_ptrs = 0;
808 Metadata** bottom = (Metadata**) _requested_bottom;
809 Metadata** top = (Metadata**) _requested_top; // exclusive
810 heap_info->ptrmap()->resize(top - bottom);
811
812 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
813 for (int i = 0; i < _native_pointers->length(); i++) {
814 NativePointerInfo info = _native_pointers->at(i);
815 oop src_obj = info._src_obj;
816 int field_offset = info._field_offset;
817 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
818 // requested_field_addr = the address of this field in the requested space
819 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
820 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
821 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
822
823 // Mark this field in the bitmap
824 BitMap::idx_t idx = requested_field_addr - bottom;
825 heap_info->ptrmap()->set_bit(idx);
826 num_non_null_ptrs ++;
827 max_idx = MAX2(max_idx, idx);
828
829 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
830 // this address if the RO/RW regions are mapped at the default location).
831
832 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
833 Metadata* native_ptr = *buffered_field_addr;
834 guarantee(native_ptr != nullptr, "sanity");
835
836 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
837 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
838 }
839
840 guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
841 "Metadata %p should have been archived", native_ptr);
842
843 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
844 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
845 }
846
847 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
848 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
849 *buffered_field_addr = (Metadata*)requested_native_ptr;
850 }
851
852 heap_info->ptrmap()->resize(max_idx + 1);
853 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
854 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
855 }
856
857 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
858 class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
859 private:
860 address _current;
861 address _next;
862
863 address _buffer_start;
864 address _buffer_end;
865 uint64_t _buffer_start_narrow_oop;
866 intptr_t _buffer_to_requested_delta;
867 int _requested_shift;
868
869 size_t _num_root_segments;
870 size_t _num_obj_arrays_logged;
871
872 public:
873 MappedWriterOopIterator(address buffer_start,
874 address buffer_end,
875 uint64_t buffer_start_narrow_oop,
876 intptr_t buffer_to_requested_delta,
877 int requested_shift,
878 size_t num_root_segments)
879 : _current(nullptr),
880 _next(buffer_start),
881 _buffer_start(buffer_start),
882 _buffer_end(buffer_end),
883 _buffer_start_narrow_oop(buffer_start_narrow_oop),
884 _buffer_to_requested_delta(buffer_to_requested_delta),
885 _requested_shift(requested_shift),
886 _num_root_segments(num_root_segments),
887 _num_obj_arrays_logged(0) {
888 }
889
890 AOTMapLogger::OopData capture(address buffered_addr) {
891 oopDesc* raw_oop = (oopDesc*)buffered_addr;
892 size_t size = size_of_buffered_oop(buffered_addr);
893 address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
894 intptr_t target_location = (intptr_t)requested_addr;
895 uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
896 uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
897 Klass* klass = real_klass_of_buffered_oop(buffered_addr);
898
899 return { buffered_addr,
900 requested_addr,
901 target_location,
902 narrow_location,
903 raw_oop,
904 klass,
905 size,
906 false };
907 }
908
909 bool has_next() override {
910 return _next < _buffer_end;
911 }
912
913 AOTMapLogger::OopData next() override {
914 _current = _next;
915 AOTMapLogger::OopData result = capture(_current);
916 if (result._klass->is_objArray_klass()) {
917 result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
918 }
919 _next = _current + result._size * BytesPerWord;
920 return result;
921 }
922
923 AOTMapLogger::OopData obj_at(narrowOop* addr) override {
924 uint64_t n = (uint64_t)(*addr);
925 if (n == 0) {
926 return null_data();
927 } else {
928 precond(n >= _buffer_start_narrow_oop);
929 address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
930 return capture(buffer_addr);
931 }
932 }
933
934 AOTMapLogger::OopData obj_at(oop* addr) override {
935 address requested_value = cast_from_oop<address>(*addr);
936 if (requested_value == nullptr) {
937 return null_data();
938 } else {
939 address buffer_addr = requested_value - _buffer_to_requested_delta;
940 return capture(buffer_addr);
941 }
942 }
943
944 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
945 return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
946 }
947 };
948
949 MemRegion r = heap_info->buffer_region();
950 address buffer_start = address(r.start());
951 address buffer_end = address(r.end());
952
953 address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
954 address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
955 int requested_shift = CompressedOops::shift();
956 intptr_t buffer_to_requested_delta = requested_start - buffer_start;
957 uint64_t buffer_start_narrow_oop = 0xdeadbeed;
958 if (UseCompressedOops) {
959 buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
960 assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
961 }
962
963 return new MappedWriterOopIterator(buffer_start,
964 buffer_end,
965 buffer_start_narrow_oop,
966 buffer_to_requested_delta,
967 requested_shift,
968 heap_info->root_segments().count());
969 }
970
971 #endif // INCLUDE_CDS_JAVA_HEAP