1 /*
2 * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMappedHeap.hpp"
26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
66
67 static size_t _num_strings = 0;
68 static size_t _string_bytes = 0;
69 static size_t _num_packages = 0;
70 static size_t _num_protection_domains = 0;
71
72 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
73 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
74 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
75
76 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
77 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
78
79 typedef HashTable<
80 size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
81 size_t, // size of this filler (in bytes)
82 127, // prime number
83 AnyObj::C_HEAP,
84 mtClassShared> FillersTable;
85 static FillersTable* _fillers;
86 static int _num_native_ptrs = 0;
87
88 void AOTMappedHeapWriter::init() {
89 if (CDSConfig::is_dumping_heap()) {
90 Universe::heap()->collect(GCCause::_java_lang_system_gc);
91
92 _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
93 _fillers = new (mtClassShared) FillersTable();
94 _requested_bottom = nullptr;
95 _requested_top = nullptr;
96
97 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
98 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
99
100 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
101
102 if (CDSConfig::old_cds_flags_used()) {
103 // With the old CDS workflow, we can guatantee determninistic output: given
104 // the same classlist file, we can generate the same static CDS archive.
105 // To ensure determinism, we always use the same compressed oop encoding
106 // (zero-based, no shift). See set_requested_address_range().
107 _is_writing_deterministic_heap = true;
108 } else {
109 // Determninistic output is not supported by the new AOT workflow, so
110 // we don't force the (zero-based, no shift) encoding. This way, it is more
111 // likely that we can avoid oop relocation in the production run.
112 _is_writing_deterministic_heap = false;
113 }
114 }
115 }
116
117 // For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
118 // in AOTMappedHeapWriter::set_requested_address_range(),
119 CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
120 if (is_writing_deterministic_heap()) {
121 return CompressedOops::UnscaledNarrowOop;
122 } else {
123 return CompressedOops::mode();
124 }
125 }
126
127 address AOTMappedHeapWriter::narrow_oop_base() {
128 if (is_writing_deterministic_heap()) {
129 return nullptr;
130 } else {
131 return CompressedOops::base();
132 }
133 }
134
135 int AOTMappedHeapWriter::narrow_oop_shift() {
136 if (is_writing_deterministic_heap()) {
137 return 0;
138 } else {
139 return CompressedOops::shift();
140 }
141 }
142
143 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
144 delete _source_objs;
145 _source_objs = nullptr;
146 }
147
148 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
149 _source_objs->append(src_obj);
150 }
151
152 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
153 AOTMappedHeapInfo* heap_info) {
154 assert(CDSConfig::is_dumping_heap(), "sanity");
155 allocate_buffer();
156 copy_source_objs_to_buffer(roots);
157 set_requested_address_range(heap_info);
158 relocate_embedded_oops(roots, heap_info);
159 }
160
161 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
162 size_t size = o->size();
163 size = o->copy_size_cds(size, o->mark());
164 return is_too_large_to_archive(size);
165 }
166
167 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
168 typeArrayOop value = java_lang_String::value_no_keepalive(string);
169 return is_too_large_to_archive(value);
170 }
171
172 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
173 assert(size > 0, "no zero-size object");
174 assert(size * HeapWordSize > size, "no overflow");
175 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
176
177 size_t byte_size = size * HeapWordSize;
178 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
179 return true;
180 } else {
181 return false;
182 }
183 }
184
185 // Various lookup functions between source_obj, buffered_obj and requested_obj
186 bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
187 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
188 address a = cast_from_oop<address>(o);
189 return (_requested_bottom <= a && a < _requested_top);
190 }
191
192 oop AOTMappedHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
193 oop req_obj = cast_to_oop(_requested_bottom + offset);
194 assert(is_in_requested_range(req_obj), "must be");
195 return req_obj;
196 }
197
198 oop AOTMappedHeapWriter::source_obj_to_requested_obj(oop src_obj) {
199 assert(CDSConfig::is_dumping_heap(), "dump-time only");
200 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
201 if (p != nullptr) {
202 return requested_obj_from_buffer_offset(p->buffer_offset());
203 } else {
204 return nullptr;
205 }
206 }
207
208 oop AOTMappedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
209 OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
210 if (oh != nullptr) {
211 return oh->resolve();
212 } else {
213 return nullptr;
214 }
215 }
216
217 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
218 oop p = buffered_addr_to_source_obj(buffered_addr);
219 if (p != nullptr) {
220 return p->klass();
221 } else if (get_filler_size_at(buffered_addr) > 0) {
222 return Universe::fillerArrayKlass();
223 } else {
224 // This is one of the root segments
225 return Universe::objectArrayKlass();
226 }
227 }
228
229 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
230 oop p = buffered_addr_to_source_obj(buffered_addr);
231 if (p != nullptr) {
232 if (UseCompactObjectHeaders) {
233 // Use the buffered object's mark word to determine size, not the source
234 // object's. The source object's mark word may have changed after the
235 // buffer was written (e.g., it may have been hashed by
236 // make_archived_object_cache_gc_safe), which would cause copy_size_cds
237 // to return a different size than what was actually allocated.
238 // The buffered copy's mark word was set by update_header_for_requested_obj
239 // and correctly reflects the allocated size via its expanded/hash state.
240 oop buffered_oop = cast_to_oop(buffered_addr);
241 markWord buffered_mark = buffered_oop->mark();
242 return buffered_oop->size_given_mark_and_klass(buffered_mark, p->klass());
243 }
244 return p->size();
245 }
246
247 size_t nbytes = get_filler_size_at(buffered_addr);
248 if (nbytes > 0) {
249 assert((nbytes % BytesPerWord) == 0, "should be aligned");
250 return nbytes / BytesPerWord;
251 }
252
253 address hrs = buffer_bottom();
254 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
255 nbytes = _heap_root_segments.size_in_bytes(seg_idx);
256 if (hrs == buffered_addr) {
257 assert((nbytes % BytesPerWord) == 0, "should be aligned");
258 return nbytes / BytesPerWord;
259 }
260 hrs += nbytes;
261 }
262
263 ShouldNotReachHere();
264 return 0;
265 }
266
267 address AOTMappedHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
268 return _requested_bottom + buffered_address_to_offset(buffered_addr);
269 }
270
271 address AOTMappedHeapWriter::requested_address() {
272 assert(_buffer != nullptr, "must be initialized");
273 return _requested_bottom;
274 }
275
276 void AOTMappedHeapWriter::allocate_buffer() {
277 int initial_buffer_size = 100000;
278 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
279 _buffer_used = 0;
280 ensure_buffer_space(1); // so that buffer_bottom() works
281 }
282
283 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
284 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
285 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
286 _buffer->at_grow(to_array_index(min_bytes));
287 }
288
289 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
290 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
291 memset(mem, 0, objArrayOopDesc::object_size(element_count));
292
293 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
294 if (UseCompactObjectHeaders) {
295 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
296 } else {
297 oopDesc::set_mark(mem, markWord::prototype());
298 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
299 }
300 arrayOopDesc::set_length(mem, element_count);
301 return objArrayOop(cast_to_oop(mem));
302 }
303
304 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
305 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
306 if (UseCompressedOops) {
307 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
308 } else {
309 *segment->obj_at_addr<oop>(index) = root;
310 }
311 }
312
313 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
314 // Depending on the number of classes we are archiving, a single roots array may be
315 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
316 // allows us to chop the large array into a series of "segments". Current layout
317 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
318 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
319 // This is simple and efficient. We do not need filler objects anywhere between the segments,
320 // or immediately after the last segment. This allows starting the object dump immediately
321 // after the roots.
322
323 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
324 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
325
326 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
327 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
328 "Should match exactly");
329
330 HeapRootSegments segments(_buffer_used,
331 roots->length(),
332 MIN_GC_REGION_ALIGNMENT,
333 max_elem_count);
334
335 int root_index = 0;
336 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
337 int size_elems = segments.size_in_elems(seg_idx);
338 size_t size_bytes = segments.size_in_bytes(seg_idx);
339
340 size_t oop_offset = _buffer_used;
341 _buffer_used = oop_offset + size_bytes;
342 ensure_buffer_space(_buffer_used);
343
344 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
345 "Roots segment %zu start is not aligned: %zu",
346 segments.count(), oop_offset);
347
348 objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems);
349 for (int i = 0; i < size_elems; i++) {
350 root_segment_at_put(seg_oop, i, roots->at(root_index++));
351 }
352
353 log_info(aot, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT,
354 size_elems, size_bytes, p2i(seg_oop));
355 }
356
357 assert(root_index == roots->length(), "Post-condition: All roots are handled");
358
359 _heap_root_segments = segments;
360 }
361
362 // The goal is to sort the objects in increasing order of:
363 // - objects that have only oop pointers
364 // - objects that have both native and oop pointers
365 // - objects that have only native pointers
366 // - objects that have no pointers
367 static int oop_sorting_rank(oop o) {
368 bool has_oop_ptr, has_native_ptr;
369 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
370
371 if (has_oop_ptr) {
372 if (!has_native_ptr) {
373 return 0;
374 } else {
375 return 1;
376 }
377 } else {
378 if (has_native_ptr) {
379 return 2;
380 } else {
381 return 3;
382 }
383 }
384 }
385
386 int AOTMappedHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
387 int rank_a = a->_rank;
388 int rank_b = b->_rank;
389
390 if (rank_a != rank_b) {
391 return rank_a - rank_b;
392 } else {
393 // If they are the same rank, sort them by their position in the _source_objs array
394 return a->_index - b->_index;
395 }
396 }
397
398 void AOTMappedHeapWriter::sort_source_objs() {
399 log_info(aot)("sorting heap objects");
400 int len = _source_objs->length();
401 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
402
403 for (int i = 0; i < len; i++) {
404 oop o = _source_objs->at(i);
405 int rank = oop_sorting_rank(o);
406 HeapObjOrder os = {i, rank};
407 _source_objs_order->append(os);
408 }
409 log_info(aot)("computed ranks");
410 _source_objs_order->sort(compare_objs_by_oop_fields);
411 log_info(aot)("sorting heap objects done");
412 }
413
414 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
415 // There could be multiple root segments, which we want to be aligned by region.
416 // Putting them ahead of objects makes sure we waste no space.
417 copy_roots_to_buffer(roots);
418
419 sort_source_objs();
420 for (int i = 0; i < _source_objs_order->length(); i++) {
421 int src_obj_index = _source_objs_order->at(i)._index;
422 oop src_obj = _source_objs->at(src_obj_index);
423 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
424 assert(info != nullptr, "must be");
425 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
426 info->set_buffer_offset(buffer_offset);
427 assert(buffer_offset <= 0x7fffffff, "sanity");
428
429 OopHandle handle(Universe::vm_global(), src_obj);
430 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
431 _buffer_offset_to_source_obj_table->maybe_grow();
432
433 if (java_lang_Module::is_instance(src_obj)) {
434 Modules::check_archived_module_oop(src_obj);
435 }
436 }
437
438 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
439 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
440 log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
441 log_info(aot)(" packages = %8zu", _num_packages);
442 log_info(aot)(" protection domains = %8zu", _num_protection_domains);
443 }
444
445 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
446 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
447 return byte_size;
448 }
449
450 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
451 assert(is_object_aligned(fill_bytes), "must be");
452 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
453
454 int initial_length = to_array_length(fill_bytes / elemSize);
455 for (int length = initial_length; length >= 0; length --) {
456 size_t array_byte_size = filler_array_byte_size(length);
457 if (array_byte_size == fill_bytes) {
458 return length;
459 }
460 }
461
462 ShouldNotReachHere();
463 return -1;
464 }
465
466 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
467 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
468 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
469 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
470 memset(mem, 0, fill_bytes);
471 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
472 if (UseCompactObjectHeaders) {
473 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
474 } else {
475 oopDesc::set_mark(mem, markWord::prototype());
476 cast_to_oop(mem)->set_narrow_klass(nk);
477 }
478 arrayOopDesc::set_length(mem, array_length);
479 return mem;
480 }
481
482 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
483 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
484 // leftover space is smaller than a zero-sized array object). Therefore, we need to
485 // make sure there's enough space of min_filler_byte_size in the current region after
486 // required_byte_size has been allocated. If not, fill the remainder of the current
487 // region.
488 size_t min_filler_byte_size = filler_array_byte_size(0);
489 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
490
491 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
492 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
493
494 if (cur_min_region_bottom != next_min_region_bottom) {
495 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
496 // we can map the region in any region-based collector.
497 assert(next_min_region_bottom > cur_min_region_bottom, "must be");
498 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
499 "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT);
500
501 const size_t filler_end = next_min_region_bottom;
502 const size_t fill_bytes = filler_end - _buffer_used;
503 assert(fill_bytes > 0, "must be");
504 ensure_buffer_space(filler_end);
505
506 int array_length = filler_array_length(fill_bytes);
507 log_info(aot, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu",
508 array_length, fill_bytes, _buffer_used);
509 HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
510 _buffer_used = filler_end;
511 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
512 }
513 }
514
515 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
516 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
517 if (p != nullptr) {
518 assert(*p > 0, "filler must be larger than zero bytes");
519 return *p;
520 } else {
521 return 0; // buffered_addr is not a filler
522 }
523 }
524
525 template <typename T>
526 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
527 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
528 *field_addr = value;
529 }
530
531 void AOTMappedHeapWriter::update_stats(oop src_obj) {
532 if (java_lang_String::is_instance(src_obj)) {
533 _num_strings ++;
534 _string_bytes += src_obj->size() * HeapWordSize;
535 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
536 } else {
537 Klass* k = src_obj->klass();
538 Symbol* name = k->name();
539 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
540 _num_packages ++;
541 } else if (name->equals("java/security/ProtectionDomain")) {
542 _num_protection_domains ++;
543 }
544 }
545 }
546
547 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
548 update_stats(src_obj);
549
550 assert(!is_too_large_to_archive(src_obj), "already checked");
551 size_t old_size = src_obj->size();
552 size_t new_size = src_obj->copy_size_cds(old_size, src_obj->mark());
553 size_t byte_size = new_size * HeapWordSize;
554 assert(byte_size > 0, "no zero-size objects");
555
556 // For region-based collectors such as G1, the archive heap may be mapped into
557 // multiple regions. We need to make sure that we don't have an object that can possible
558 // span across two regions.
559 maybe_fill_gc_region_gap(byte_size);
560
561 size_t new_used = _buffer_used + byte_size;
562 assert(new_used > _buffer_used, "no wrap around");
563
564 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
565 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
566 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
567
568 ensure_buffer_space(new_used);
569
570 address from = cast_from_oop<address>(src_obj);
571 address to = offset_to_buffered_address<address>(_buffer_used);
572 assert(is_object_aligned(_buffer_used), "sanity");
573 assert(is_object_aligned(byte_size), "sanity");
574 memcpy(to, from, MIN2(new_size, old_size) * HeapWordSize);
575
576 // These native pointers will be restored explicitly at run time.
577 if (java_lang_Module::is_instance(src_obj)) {
578 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
579 } else if (java_lang_ClassLoader::is_instance(src_obj)) {
580 #ifdef ASSERT
581 // We only archive these loaders
582 if (src_obj != SystemDictionary::java_platform_loader() &&
583 src_obj != SystemDictionary::java_system_loader()) {
584 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
585 }
586 #endif
587 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
588 }
589
590 size_t buffered_obj_offset = _buffer_used;
591 _buffer_used = new_used;
592
593 return buffered_obj_offset;
594 }
595
596 // Set the range [_requested_bottom, _requested_top), the requested address range of all
597 // the archived heap objects in the production run.
598 //
599 // (1) UseCompressedOops == true && !is_writing_deterministic_heap()
600 //
601 // The archived objects are stored using the COOPS encoding of the assembly phase.
602 // We pick a range within the heap used by the assembly phase.
603 //
604 // In the production run, if different COOPS encodings are used:
605 // - The heap contents needs to be relocated.
606 //
607 // (2) UseCompressedOops == true && is_writing_deterministic_heap()
608 //
609 // We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
610 //
611 // (3) UseCompressedOops == false:
612 //
613 // In the production run, the heap range is usually picked (randomly) by the OS, so we
614 // will almost always need to perform relocation, regardless of how we pick the requested
615 // address range.
616 //
617 // So we just hard code it to NOCOOPS_REQUESTED_BASE.
618 //
619 void AOTMappedHeapWriter::set_requested_address_range(AOTMappedHeapInfo* info) {
620 assert(!info->is_used(), "only set once");
621
622 size_t heap_region_byte_size = _buffer_used;
623 assert(heap_region_byte_size > 0, "must archived at least one object!");
624
625 if (UseCompressedOops) {
626 if (is_writing_deterministic_heap()) {
627 // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
628 // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
629 // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
630 // requested location to avoid relocation.
631 //
632 // For other collectors or larger heaps, relocation is unavoidable, but is usually
633 // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
634 address heap_end = (address)0x100000000;
635 size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
636 if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
637 log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
638 AOTMetaspace::unrecoverable_writing_error();
639 }
640 _requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
641 } else if (UseG1GC) {
642 // For G1, pick the range at the top of the current heap. If the exact same heap sizes
643 // are used in the production run, it's likely that we can map the archived objects
644 // at the requested location to avoid relocation.
645 address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
646 log_info(aot, heap)("Heap end = %p", heap_end);
647 _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
648 _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT);
649 assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
650 } else {
651 _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT);
652 }
653 } else {
654 // We always write the objects as if the heap started at this address. This
655 // makes the contents of the archive heap deterministic.
656 //
657 // Note that at runtime, the heap address is selected by the OS, so the archive
658 // heap will not be mapped at 0x10000000, and the contents need to be patched.
659 _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT);
660 }
661
662 assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity");
663
664 _requested_top = _requested_bottom + _buffer_used;
665
666 info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
667 offset_to_buffered_address<HeapWord*>(_buffer_used)));
668 info->set_root_segments(_heap_root_segments);
669 }
670
671 // Oop relocation
672
673 template <typename T> T* AOTMappedHeapWriter::requested_addr_to_buffered_addr(T* p) {
674 assert(is_in_requested_range(cast_to_oop(p)), "must be");
675
676 address addr = address(p);
677 assert(addr >= _requested_bottom, "must be");
678 size_t offset = addr - _requested_bottom;
679 return offset_to_buffered_address<T*>(offset);
680 }
681
682 template <typename T> oop AOTMappedHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
683 oop o = load_oop_from_buffer(buffered_addr);
684 assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
685 return o;
686 }
687
688 template <typename T> void AOTMappedHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
689 oop request_oop) {
690 assert(request_oop == nullptr || is_in_requested_range(request_oop), "must be");
691 store_oop_in_buffer(buffered_addr, request_oop);
692 }
693
694 inline void AOTMappedHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
695 *buffered_addr = requested_obj;
696 }
697
698 inline void AOTMappedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
699 narrowOop val = CompressedOops::encode(requested_obj);
700 *buffered_addr = val;
701 }
702
703 oop AOTMappedHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
704 return *buffered_addr;
705 }
706
707 oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
708 return CompressedOops::decode(*buffered_addr);
709 }
710
711 template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
712 oop request_referent = source_obj_to_requested_obj(source_referent);
713 if (UseCompressedOops && is_writing_deterministic_heap()) {
714 // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
715 // 32 bits of request_referent
716 intptr_t addr = cast_from_oop<intptr_t>(request_referent);
717 *((narrowOop*)field_addr_in_buffer) = CompressedOops::narrow_oop_cast(addr);
718 } else {
719 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
720 }
721 if (request_referent != nullptr) {
722 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
723 }
724 }
725
726 template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
727 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
728 address requested_region_bottom;
729
730 assert(request_p >= (T*)_requested_bottom, "sanity");
731 assert(request_p < (T*)_requested_top, "sanity");
732 requested_region_bottom = _requested_bottom;
733
734 // Mark the pointer in the oopmap
735 T* region_bottom = (T*)requested_region_bottom;
736 assert(request_p >= region_bottom, "must be");
737 BitMap::idx_t idx = request_p - region_bottom;
738 assert(idx < oopmap->size(), "overflow");
739 oopmap->set_bit(idx);
740 }
741
742 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
743 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
744 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
745 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
746
747 oop fake_oop = cast_to_oop(buffered_addr);
748 if (UseCompactObjectHeaders) {
749 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
750 assert(fake_oop->mark().narrow_klass() != 0, "must not be null");
751 } else {
752 fake_oop->set_narrow_klass(nk);
753 }
754
755 if (src_obj == nullptr) {
756 return;
757 }
758 // We need to retain the identity_hash, because it may have been used by some hashtables
759 // in the shared heap.
760 if (!src_obj->fast_no_hash_check()) {
761 intptr_t src_hash = src_obj->identity_hash();
762 if (UseCompactObjectHeaders) {
763 markWord m = markWord::prototype().set_narrow_klass(nk);
764 m = m.copy_hashctrl_from(src_obj->mark());
765 fake_oop->set_mark(m);
766 if (m.is_hashed_not_expanded()) {
767 fake_oop->set_mark(fake_oop->initialize_hash_if_necessary(src_obj, src_klass, m));
768 } else if (m.is_not_hashed_expanded()) {
769 fake_oop->set_mark(m.set_not_hashed_not_expanded());
770 }
771 assert(!fake_oop->mark().is_not_hashed_expanded() && !fake_oop->mark().is_hashed_not_expanded(), "must not be not-hashed-moved and not be hashed-not-moved");
772 } else {
773 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
774 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
775 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
776 }
777 assert(fake_oop->mark().is_unlocked(), "sanity");
778 }
779 // Strip age bits.
780 fake_oop->set_mark(fake_oop->mark().set_age(0));
781 }
782
783 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
784 oop _src_obj;
785 address _buffered_obj;
786 CHeapBitMap* _oopmap;
787 bool _is_java_lang_ref;
788 public:
789 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
790 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
791 {
792 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
793 }
794
795 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
796 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
797
798 private:
799 template <class T> void do_oop_work(T *p) {
800 int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
801 T* field_addr = (T*)(_buffered_obj + field_offset);
802 oop referent = load_source_oop_from_buffer<T>(field_addr);
803 referent = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, referent);
804 AOTMappedHeapWriter::relocate_field_in_buffer<T>(field_addr, referent, _oopmap);
805 }
806 };
807
808 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
809 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
810 size_t start = bitmap->find_first_set_bit(0);
811 size_t end = bitmap->size();
812 log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
813 start, end,
814 start * 100 / total_bits,
815 end * 100 / total_bits,
816 (end - start) * 100 / total_bits);
817 }
818
819 // Update all oop fields embedded in the buffered objects
820 void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
821 AOTMappedHeapInfo* heap_info) {
822 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
823 size_t heap_region_byte_size = _buffer_used;
824 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
825
826 for (int i = 0; i < _source_objs_order->length(); i++) {
827 int src_obj_index = _source_objs_order->at(i)._index;
828 oop src_obj = _source_objs->at(src_obj_index);
829 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
830 assert(info != nullptr, "must be");
831 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
832 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
833 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
834 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
835 src_obj->oop_iterate(&relocator);
836 mark_native_pointers(src_obj);
837 };
838
839 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
840 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
841 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
842 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
843
844 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
845 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
846 address buffered_obj = offset_to_buffered_address<address>(seg_offset);
847 int length = _heap_root_segments.size_in_elems(seg_idx);
848
849 size_t elem_size = UseCompressedOops ? sizeof(narrowOop) : sizeof(oop);
850
851 for (int i = 0; i < length; i++) {
852 // There is no source object; these are native oops - load, translate and
853 // write back
854 size_t elem_offset = objArrayOopDesc::base_offset_in_bytes() + elem_size * i;
855 HeapWord* elem_addr = (HeapWord*)(buffered_obj + elem_offset);
856 oop obj = NativeAccess<>::oop_load(elem_addr);
857 obj = HeapShared::maybe_remap_referent(false /* is_reference_field */, elem_offset, obj);
858 if (UseCompressedOops) {
859 relocate_field_in_buffer<narrowOop>((narrowOop*)elem_addr, obj, heap_info->oopmap());
860 } else {
861 relocate_field_in_buffer<oop>((oop*)elem_addr, obj, heap_info->oopmap());
862 }
863 }
864 }
865
866 compute_ptrmap(heap_info);
867
868 size_t total_bytes = (size_t)_buffer->length();
869 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
870 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
871 }
872
873 void AOTMappedHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
874 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
875 if (ptr != nullptr) {
876 NativePointerInfo info;
877 info._src_obj = src_obj;
878 info._field_offset = field_offset;
879 _native_pointers->append(info);
880 HeapShared::set_has_native_pointers(src_obj);
881 _num_native_ptrs ++;
882 }
883 }
884
885 void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
886 HeapShared::do_metadata_offsets(orig_obj, [&](int offset) {
887 mark_native_pointer(orig_obj, offset);
888 });
889 }
890
891 void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) {
892 int num_non_null_ptrs = 0;
893 Metadata** bottom = (Metadata**) _requested_bottom;
894 Metadata** top = (Metadata**) _requested_top; // exclusive
895 heap_info->ptrmap()->resize(top - bottom);
896
897 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
898 for (int i = 0; i < _native_pointers->length(); i++) {
899 NativePointerInfo info = _native_pointers->at(i);
900 oop src_obj = info._src_obj;
901 int field_offset = info._field_offset;
902 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
903 // requested_field_addr = the address of this field in the requested space
904 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
905 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
906 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
907
908 // Mark this field in the bitmap
909 BitMap::idx_t idx = requested_field_addr - bottom;
910 heap_info->ptrmap()->set_bit(idx);
911 num_non_null_ptrs ++;
912 max_idx = MAX2(max_idx, idx);
913
914 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
915 // this address if the RO/RW regions are mapped at the default location).
916
917 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
918 Metadata* native_ptr = *buffered_field_addr;
919 guarantee(native_ptr != nullptr, "sanity");
920
921 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
922 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
923 }
924
925 if (!ArchiveBuilder::current()->has_been_archived((address)native_ptr)) {
926 ResourceMark rm;
927 LogStreamHandle(Error, aot) log;
928 log.print("Marking native pointer for oop %p (type = %s, offset = %d)",
929 cast_from_oop<void*>(src_obj), src_obj->klass()->external_name(), field_offset);
930 src_obj->print_on(&log);
931 fatal("Metadata %p should have been archived", native_ptr);
932 }
933
934 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
935 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
936 *buffered_field_addr = (Metadata*)requested_native_ptr;
937 }
938
939 heap_info->ptrmap()->resize(max_idx + 1);
940 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
941 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
942 }
943
944 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(AOTMappedHeapInfo* heap_info) {
945 class MappedWriterOopIterator : public AOTMappedHeapOopIterator {
946 public:
947 MappedWriterOopIterator(address buffer_start,
948 address buffer_end,
949 address requested_base,
950 address requested_start,
951 int requested_shift,
952 size_t num_root_segments) :
953 AOTMappedHeapOopIterator(buffer_start,
954 buffer_end,
955 requested_base,
956 requested_start,
957 requested_shift,
958 num_root_segments) {}
959
960 AOTMapLogger::OopData capture(address buffered_addr) override {
961 oopDesc* raw_oop = (oopDesc*)buffered_addr;
962 size_t size = size_of_buffered_oop(buffered_addr);
963 address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
964 intptr_t target_location = (intptr_t)requested_addr;
965 uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
966 uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
967 Klass* klass = real_klass_of_buffered_oop(buffered_addr);
968
969 return { buffered_addr,
970 requested_addr,
971 target_location,
972 narrow_location,
973 raw_oop,
974 klass,
975 size,
976 false };
977 }
978 };
979
980 MemRegion r = heap_info->buffer_region();
981 address buffer_start = address(r.start());
982 address buffer_end = address(r.end());
983
984 address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
985 address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
986 int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
987
988 return new MappedWriterOopIterator(buffer_start,
989 buffer_end,
990 requested_base,
991 requested_start,
992 requested_shift,
993 heap_info->root_segments().count());
994 }
995
996 #endif // INCLUDE_CDS_JAVA_HEAP