1 /*
  2  * Copyright (c) 2023, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotMappedHeap.hpp"
 26 #include "cds/aotMappedHeapWriter.hpp"
 27 #include "cds/aotReferenceObjSupport.hpp"
 28 #include "cds/cdsConfig.hpp"
 29 #include "cds/filemap.hpp"
 30 #include "cds/heapShared.inline.hpp"
 31 #include "cds/regeneratedClasses.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "classfile/modules.hpp"
 34 #include "classfile/systemDictionary.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "memory/allocation.inline.hpp"
 37 #include "memory/iterator.inline.hpp"
 38 #include "memory/oopFactory.hpp"
 39 #include "memory/universe.hpp"
 40 #include "oops/compressedOops.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "oops/oopHandle.inline.hpp"
 44 #include "oops/typeArrayKlass.hpp"
 45 #include "oops/typeArrayOop.hpp"
 46 #include "runtime/arguments.hpp"
 47 #include "runtime/java.hpp"
 48 #include "runtime/mutexLocker.hpp"
 49 #include "utilities/bitMap.inline.hpp"
 50 #if INCLUDE_G1GC
 51 #include "gc/g1/g1CollectedHeap.hpp"
 52 #include "gc/g1/g1HeapRegion.hpp"
 53 #endif
 54 
 55 #if INCLUDE_CDS_JAVA_HEAP
 56 
 57 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
 58 
 59 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
 60 size_t AOTMappedHeapWriter::_buffer_used;
 61 
 62 // Heap root segments
 63 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
 64 
 65 address AOTMappedHeapWriter::_requested_bottom;
 66 address AOTMappedHeapWriter::_requested_top;
 67 
 68 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
 69 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
 70 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
 71 
 72 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
 73 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
 74 
 75 DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
 76 
 77 typedef HashTable<
 78       size_t,    // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
 79       size_t,    // size of this filler (in bytes)
 80       127,       // prime number
 81       AnyObj::C_HEAP,
 82       mtClassShared> FillersTable;
 83 static FillersTable* _fillers;
 84 static int _num_native_ptrs = 0;
 85 
 86 void AOTMappedHeapWriter::init() {
 87   if (CDSConfig::is_dumping_heap()) {
 88     Universe::heap()->collect(GCCause::_java_lang_system_gc);
 89 
 90     _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
 91     _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 92     _fillers = new (mtClassShared) FillersTable();
 93     _requested_bottom = nullptr;
 94     _requested_top = nullptr;
 95 
 96     _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
 97     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
 98 
 99     guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
100 
101     if (CDSConfig::old_cds_flags_used()) {
102       // With the old CDS workflow, we can guatantee determninistic output: given
103       // the same classlist file, we can generate the same static CDS archive.
104       // To ensure determinism, we always use the same compressed oop encoding
105       // (zero-based, no shift). See set_requested_address_range().
106       _is_writing_deterministic_heap = true;
107     } else {
108       // Determninistic output is not supported by the new AOT workflow, so
109       // we don't force the (zero-based, no shift) encoding. This way, it is more
110       // likely that we can avoid oop relocation in the production run.
111       _is_writing_deterministic_heap = false;
112     }
113   }
114 }
115 
116 // For AOTMappedHeapWriter::narrow_oop_{mode, base, shift}(), see comments
117 // in AOTMappedHeapWriter::set_requested_address_range(),
118 CompressedOops::Mode AOTMappedHeapWriter::narrow_oop_mode() {
119   if (is_writing_deterministic_heap()) {
120     return CompressedOops::UnscaledNarrowOop;
121   } else {
122     return CompressedOops::mode();
123   }
124 }
125 
126 address AOTMappedHeapWriter::narrow_oop_base() {
127   if (is_writing_deterministic_heap()) {
128     return nullptr;
129   } else {
130     return CompressedOops::base();
131   }
132 }
133 
134 int AOTMappedHeapWriter::narrow_oop_shift() {
135   if (is_writing_deterministic_heap()) {
136     return 0;
137   } else {
138     return CompressedOops::shift();
139   }
140 }
141 
142 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
143   delete _source_objs;
144   _source_objs = nullptr;
145 
146   delete _dumped_interned_strings;
147   _dumped_interned_strings = nullptr;
148 }
149 
150 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
151   _source_objs->append(src_obj);
152 }
153 
154 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
155                                 AOTMappedHeapInfo* heap_info) {
156   assert(CDSConfig::is_dumping_heap(), "sanity");
157   allocate_buffer();
158   copy_source_objs_to_buffer(roots);
159   set_requested_address_range(heap_info);
160   relocate_embedded_oops(roots, heap_info);
161 }
162 
163 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
164   return is_too_large_to_archive(o->size());
165 }
166 
167 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
168   typeArrayOop value = java_lang_String::value_no_keepalive(string);
169   return is_too_large_to_archive(value);
170 }
171 
172 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
173   assert(size > 0, "no zero-size object");
174   assert(size * HeapWordSize > size, "no overflow");
175   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
176 
177   size_t byte_size = size * HeapWordSize;
178   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
179     return true;
180   } else {
181     return false;
182   }
183 }
184 
185 // Keep track of the contents of the archived interned string table. This table
186 // is used only by CDSHeapVerifier.
187 void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
188   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
189   assert(!is_string_too_large_to_archive(string), "must be");
190   bool created;
191   _dumped_interned_strings->put_if_absent(string, true, &created);
192   if (created) {
193     // Prevent string deduplication from changing the value field to
194     // something not in the archive.
195     java_lang_String::set_deduplication_forbidden(string);
196     _dumped_interned_strings->maybe_grow();
197   }
198 }
199 
200 bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
201   return _dumped_interned_strings->get(o) != nullptr;
202 }
203 
204 // Various lookup functions between source_obj, buffered_obj and requested_obj
205 bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
206   assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
207   address a = cast_from_oop<address>(o);
208   return (_requested_bottom <= a && a < _requested_top);
209 }
210 
211 oop AOTMappedHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
212   oop req_obj = cast_to_oop(_requested_bottom + offset);
213   assert(is_in_requested_range(req_obj), "must be");
214   return req_obj;
215 }
216 
217 oop AOTMappedHeapWriter::source_obj_to_requested_obj(oop src_obj) {
218   assert(CDSConfig::is_dumping_heap(), "dump-time only");
219   HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
220   if (p != nullptr) {
221     return requested_obj_from_buffer_offset(p->buffer_offset());
222   } else {
223     return nullptr;
224   }
225 }
226 
227 oop AOTMappedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
228   OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
229   if (oh != nullptr) {
230     return oh->resolve();
231   } else {
232     return nullptr;
233   }
234 }
235 
236 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
237   oop p = buffered_addr_to_source_obj(buffered_addr);
238   if (p != nullptr) {
239     return p->klass();
240   } else if (get_filler_size_at(buffered_addr) > 0) {
241     return Universe::fillerArrayKlass();
242   } else {
243     // This is one of the root segments
244     return Universe::objectArrayKlass();
245   }
246 }
247 
248 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
249   oop p = buffered_addr_to_source_obj(buffered_addr);
250   if (p != nullptr) {
251     return p->size();
252   }
253 
254   size_t nbytes = get_filler_size_at(buffered_addr);
255   if (nbytes > 0) {
256     assert((nbytes % BytesPerWord) == 0, "should be aligned");
257     return nbytes / BytesPerWord;
258   }
259 
260   address hrs = buffer_bottom();
261   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
262     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
263     if (hrs == buffered_addr) {
264       assert((nbytes % BytesPerWord) == 0, "should be aligned");
265       return nbytes / BytesPerWord;
266     }
267     hrs += nbytes;
268   }
269 
270   ShouldNotReachHere();
271   return 0;
272 }
273 
274 address AOTMappedHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
275   return _requested_bottom + buffered_address_to_offset(buffered_addr);
276 }
277 
278 address AOTMappedHeapWriter::requested_address() {
279   assert(_buffer != nullptr, "must be initialized");
280   return _requested_bottom;
281 }
282 
283 void AOTMappedHeapWriter::allocate_buffer() {
284   int initial_buffer_size = 100000;
285   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
286   _buffer_used = 0;
287   ensure_buffer_space(1); // so that buffer_bottom() works
288 }
289 
290 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
291   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
292   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
293   _buffer->at_grow(to_array_index(min_bytes));
294 }
295 
296 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
297   HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
298   memset(mem, 0, refArrayOopDesc::object_size(element_count));
299 
300   // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
301   if (UseCompactObjectHeaders) {
302     oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
303   } else {
304     assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
305     oopDesc::set_mark(mem, markWord::prototype());
306     oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
307   }
308   arrayOopDesc::set_length(mem, element_count);
309   return objArrayOop(cast_to_oop(mem));
310 }
311 
312 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
313   // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
314   if (UseCompressedOops) {
315     *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
316   } else {
317     *segment->obj_at_addr<oop>(index) = root;
318   }
319 }
320 
321 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
322   // Depending on the number of classes we are archiving, a single roots array may be
323   // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
324   // allows us to chop the large array into a series of "segments". Current layout
325   // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
326   // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
327   // This is simple and efficient. We do not need filler objects anywhere between the segments,
328   // or immediately after the last segment. This allows starting the object dump immediately
329   // after the roots.
330 
331   assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
332          "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
333 
334   int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
335   assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
336          "Should match exactly");
337 
338   HeapRootSegments segments(_buffer_used,
339                             roots->length(),
340                             MIN_GC_REGION_ALIGNMENT,
341                             max_elem_count);
342 
343   int root_index = 0;
344   for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
345     int size_elems = segments.size_in_elems(seg_idx);
346     size_t size_bytes = segments.size_in_bytes(seg_idx);
347 
348     size_t oop_offset = _buffer_used;
349     _buffer_used = oop_offset + size_bytes;
350     ensure_buffer_space(_buffer_used);
351 
352     assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
353            "Roots segment %zu start is not aligned: %zu",
354            segments.count(), oop_offset);
355 
356     objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems);
357     for (int i = 0; i < size_elems; i++) {
358       root_segment_at_put(seg_oop, i, roots->at(root_index++));
359     }
360 
361     log_info(aot, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT,
362                         size_elems, size_bytes, p2i(seg_oop));
363   }
364 
365   assert(root_index == roots->length(), "Post-condition: All roots are handled");
366 
367   _heap_root_segments = segments;
368 }
369 
370 // The goal is to sort the objects in increasing order of:
371 // - objects that have only oop pointers
372 // - objects that have both native and oop pointers
373 // - objects that have only native pointers
374 // - objects that have no pointers
375 static int oop_sorting_rank(oop o) {
376   bool has_oop_ptr, has_native_ptr;
377   HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
378 
379   if (has_oop_ptr) {
380     if (!has_native_ptr) {
381       return 0;
382     } else {
383       return 1;
384     }
385   } else {
386     if (has_native_ptr) {
387       return 2;
388     } else {
389       return 3;
390     }
391   }
392 }
393 
394 int AOTMappedHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
395   int rank_a = a->_rank;
396   int rank_b = b->_rank;
397 
398   if (rank_a != rank_b) {
399     return rank_a - rank_b;
400   } else {
401     // If they are the same rank, sort them by their position in the _source_objs array
402     return a->_index - b->_index;
403   }
404 }
405 
406 void AOTMappedHeapWriter::sort_source_objs() {
407   log_info(aot)("sorting heap objects");
408   int len = _source_objs->length();
409   _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
410 
411   for (int i = 0; i < len; i++) {
412     oop o = _source_objs->at(i);
413     int rank = oop_sorting_rank(o);
414     HeapObjOrder os = {i, rank};
415     _source_objs_order->append(os);
416   }
417   log_info(aot)("computed ranks");
418   _source_objs_order->sort(compare_objs_by_oop_fields);
419   log_info(aot)("sorting heap objects done");
420 }
421 
422 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
423   // There could be multiple root segments, which we want to be aligned by region.
424   // Putting them ahead of objects makes sure we waste no space.
425   copy_roots_to_buffer(roots);
426 
427   sort_source_objs();
428   for (int i = 0; i < _source_objs_order->length(); i++) {
429     int src_obj_index = _source_objs_order->at(i)._index;
430     oop src_obj = _source_objs->at(src_obj_index);
431     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
432     assert(info != nullptr, "must be");
433     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
434     info->set_buffer_offset(buffer_offset);
435 
436     OopHandle handle(Universe::vm_global(), src_obj);
437     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
438     _buffer_offset_to_source_obj_table->maybe_grow();
439 
440     if (java_lang_Module::is_instance(src_obj)) {
441       Modules::check_archived_module_oop(src_obj);
442     }
443   }
444 
445   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
446                 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
447 }
448 
449 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
450   size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
451   return byte_size;
452 }
453 
454 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
455   assert(is_object_aligned(fill_bytes), "must be");
456   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
457 
458   int initial_length = to_array_length(fill_bytes / elemSize);
459   for (int length = initial_length; length >= 0; length --) {
460     size_t array_byte_size = filler_array_byte_size(length);
461     if (array_byte_size == fill_bytes) {
462       return length;
463     }
464   }
465 
466   ShouldNotReachHere();
467   return -1;
468 }
469 
470 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
471   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
472   Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
473   HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
474   memset(mem, 0, fill_bytes);
475   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
476   if (UseCompactObjectHeaders) {
477     oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
478   } else {
479     assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
480     oopDesc::set_mark(mem, markWord::prototype());
481     cast_to_oop(mem)->set_narrow_klass(nk);
482   }
483   arrayOopDesc::set_length(mem, array_length);
484   return mem;
485 }
486 
487 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
488   // We fill only with arrays (so we don't need to use a single HeapWord filler if the
489   // leftover space is smaller than a zero-sized array object). Therefore, we need to
490   // make sure there's enough space of min_filler_byte_size in the current region after
491   // required_byte_size has been allocated. If not, fill the remainder of the current
492   // region.
493   size_t min_filler_byte_size = filler_array_byte_size(0);
494   size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
495 
496   const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
497   const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
498 
499   if (cur_min_region_bottom != next_min_region_bottom) {
500     // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
501     // we can map the region in any region-based collector.
502     assert(next_min_region_bottom > cur_min_region_bottom, "must be");
503     assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
504            "no buffered object can be larger than %d bytes",  MIN_GC_REGION_ALIGNMENT);
505 
506     const size_t filler_end = next_min_region_bottom;
507     const size_t fill_bytes = filler_end - _buffer_used;
508     assert(fill_bytes > 0, "must be");
509     ensure_buffer_space(filler_end);
510 
511     int array_length = filler_array_length(fill_bytes);
512     log_info(aot, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu",
513                         array_length, fill_bytes, _buffer_used);
514     HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
515     _buffer_used = filler_end;
516     _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
517   }
518 }
519 
520 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
521   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
522   if (p != nullptr) {
523     assert(*p > 0, "filler must be larger than zero bytes");
524     return *p;
525   } else {
526     return 0; // buffered_addr is not a filler
527   }
528 }
529 
530 template <typename T>
531 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
532   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
533   *field_addr = value;
534 }
535 
536 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
537   assert(!is_too_large_to_archive(src_obj), "already checked");
538   size_t byte_size = src_obj->size() * HeapWordSize;
539   assert(byte_size > 0, "no zero-size objects");
540 
541   // For region-based collectors such as G1, the archive heap may be mapped into
542   // multiple regions. We need to make sure that we don't have an object that can possible
543   // span across two regions.
544   maybe_fill_gc_region_gap(byte_size);
545 
546   size_t new_used = _buffer_used + byte_size;
547   assert(new_used > _buffer_used, "no wrap around");
548 
549   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
550   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
551   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
552 
553   ensure_buffer_space(new_used);
554 
555   address from = cast_from_oop<address>(src_obj);
556   address to = offset_to_buffered_address<address>(_buffer_used);
557   assert(is_object_aligned(_buffer_used), "sanity");
558   assert(is_object_aligned(byte_size), "sanity");
559   memcpy(to, from, byte_size);
560 
561   // These native pointers will be restored explicitly at run time.
562   if (java_lang_Module::is_instance(src_obj)) {
563     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
564   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
565 #ifdef ASSERT
566     // We only archive these loaders
567     if (src_obj != SystemDictionary::java_platform_loader() &&
568         src_obj != SystemDictionary::java_system_loader()) {
569       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
570     }
571 #endif
572     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
573   }
574 
575   size_t buffered_obj_offset = _buffer_used;
576   _buffer_used = new_used;
577 
578   return buffered_obj_offset;
579 }
580 
581 // Set the range [_requested_bottom, _requested_top), the requested address range of all
582 // the archived heap objects in the production run.
583 //
584 // (1) UseCompressedOops == true && !is_writing_deterministic_heap()
585 //
586 //     The archived objects are stored using the COOPS encoding of the assembly phase.
587 //     We pick a range within the heap used by the assembly phase.
588 //
589 //     In the production run, if different COOPS encodings are used:
590 //         - The heap contents needs to be relocated.
591 //
592 // (2) UseCompressedOops == true && is_writing_deterministic_heap()
593 //
594 //     We always use zero-based, zero-shift encoding. _requested_top is aligned to 0x10000000.
595 //
596 // (3) UseCompressedOops == false:
597 //
598 //     In the production run, the heap range is usually picked (randomly) by the OS, so we
599 //     will almost always need to perform relocation, regardless of how we pick the requested
600 //     address range.
601 //
602 //     So we just hard code it to NOCOOPS_REQUESTED_BASE.
603 //
604 void AOTMappedHeapWriter::set_requested_address_range(AOTMappedHeapInfo* info) {
605   assert(!info->is_used(), "only set once");
606 
607   size_t heap_region_byte_size = _buffer_used;
608   assert(heap_region_byte_size > 0, "must archived at least one object!");
609 
610   if (UseCompressedOops) {
611     if (is_writing_deterministic_heap()) {
612       // Pick a heap range so that requested addresses can be encoded with zero-base/no shift.
613       // We align the requested bottom to at least 1 MB: if the production run uses G1 with a small
614       // heap (e.g., -Xmx256m), it's likely that we can map the archived objects at the
615       // requested location to avoid relocation.
616       //
617       // For other collectors or larger heaps, relocation is unavoidable, but is usually
618       // quite cheap. If you really want to avoid relocation, use the AOT workflow instead.
619       address heap_end = (address)0x100000000;
620       size_t alignment = MAX2(MIN_GC_REGION_ALIGNMENT, 1024 * 1024);
621       if (align_up(heap_region_byte_size, alignment) >= (size_t)heap_end) {
622         log_error(aot, heap)("cached heap space is too large: %zu bytes", heap_region_byte_size);
623         AOTMetaspace::unrecoverable_writing_error();
624       }
625       _requested_bottom = align_down(heap_end - heap_region_byte_size, alignment);
626     } else if (UseG1GC) {
627       // For G1, pick the range at the top of the current heap. If the exact same heap sizes
628       // are used in the production run, it's likely that we can map the archived objects
629       // at the requested location to avoid relocation.
630       address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
631       log_info(aot, heap)("Heap end = %p", heap_end);
632       _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
633       _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT);
634       assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
635     } else {
636       _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT);
637     }
638   } else {
639     // We always write the objects as if the heap started at this address. This
640     // makes the contents of the archive heap deterministic.
641     //
642     // Note that at runtime, the heap address is selected by the OS, so the archive
643     // heap will not be mapped at 0x10000000, and the contents need to be patched.
644     _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT);
645   }
646 
647   assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity");
648 
649   _requested_top = _requested_bottom + _buffer_used;
650 
651   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
652                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
653   info->set_root_segments(_heap_root_segments);
654 }
655 
656 // Oop relocation
657 
658 template <typename T> T* AOTMappedHeapWriter::requested_addr_to_buffered_addr(T* p) {
659   assert(is_in_requested_range(cast_to_oop(p)), "must be");
660 
661   address addr = address(p);
662   assert(addr >= _requested_bottom, "must be");
663   size_t offset = addr - _requested_bottom;
664   return offset_to_buffered_address<T*>(offset);
665 }
666 
667 template <typename T> oop AOTMappedHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
668   oop o = load_oop_from_buffer(buffered_addr);
669   assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
670   return o;
671 }
672 
673 template <typename T> void AOTMappedHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
674                                                                                    oop request_oop) {
675   assert(request_oop == nullptr || is_in_requested_range(request_oop), "must be");
676   store_oop_in_buffer(buffered_addr, request_oop);
677 }
678 
679 inline void AOTMappedHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
680   *buffered_addr = requested_obj;
681 }
682 
683 inline void AOTMappedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
684   narrowOop val = CompressedOops::encode(requested_obj);
685   *buffered_addr = val;
686 }
687 
688 oop AOTMappedHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
689   return *buffered_addr;
690 }
691 
692 oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
693   return CompressedOops::decode(*buffered_addr);
694 }
695 
696 template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
697   oop request_referent = source_obj_to_requested_obj(source_referent);
698   if (UseCompressedOops && is_writing_deterministic_heap()) {
699     // We use zero-based, 0-shift encoding, so the narrowOop is just the lower
700     // 32 bits of request_referent
701     intptr_t addr = cast_from_oop<intptr_t>(request_referent);
702     *((narrowOop*)field_addr_in_buffer) = CompressedOops::narrow_oop_cast(addr);
703   } else {
704     store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
705   }
706   if (request_referent != nullptr) {
707     mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
708   }
709 }
710 
711 template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
712   T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
713   address requested_region_bottom;
714 
715   assert(request_p >= (T*)_requested_bottom, "sanity");
716   assert(request_p <  (T*)_requested_top, "sanity");
717   requested_region_bottom = _requested_bottom;
718 
719   // Mark the pointer in the oopmap
720   T* region_bottom = (T*)requested_region_bottom;
721   assert(request_p >= region_bottom, "must be");
722   BitMap::idx_t idx = request_p - region_bottom;
723   assert(idx < oopmap->size(), "overflow");
724   oopmap->set_bit(idx);
725 }
726 
727 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
728   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
729   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
730   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
731 
732   oop fake_oop = cast_to_oop(buffered_addr);
733   if (UseCompactObjectHeaders) {
734     markWord prototype_header = src_klass->prototype_header().set_narrow_klass(nk);
735     fake_oop->set_mark(prototype_header);
736   } else {
737     fake_oop->set_narrow_klass(nk);
738   }
739 
740   if (src_obj == nullptr) {
741     return;
742   }
743   // We need to retain the identity_hash, because it may have been used by some hashtables
744   // in the shared heap.
745   if (!src_obj->fast_no_hash_check() && (!(Arguments::is_valhalla_enabled() && src_obj->mark().is_inline_type()))) {
746     intptr_t src_hash = src_obj->identity_hash();
747     if (UseCompactObjectHeaders) {
748       fake_oop->set_mark(fake_oop->mark().copy_set_hash(src_hash));
749     } else if (Arguments::is_valhalla_enabled()) {
750       fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
751     } else {
752       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
753     }
754     assert(fake_oop->mark().is_unlocked(), "sanity");
755 
756     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
757     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
758   }
759   // Strip age bits.
760   fake_oop->set_mark(fake_oop->mark().set_age(0));
761 }
762 
763 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
764   oop _src_obj;
765   address _buffered_obj;
766   CHeapBitMap* _oopmap;
767   bool _is_java_lang_ref;
768 public:
769   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
770     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
771   {
772     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
773   }
774 
775   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
776   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
777 
778 private:
779   template <class T> void do_oop_work(T *p) {
780     int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
781     T* field_addr = (T*)(_buffered_obj + field_offset);
782     oop referent = load_source_oop_from_buffer<T>(field_addr);
783     referent = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, referent);
784     AOTMappedHeapWriter::relocate_field_in_buffer<T>(field_addr, referent, _oopmap);
785   }
786 };
787 
788 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
789   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
790   size_t start = bitmap->find_first_set_bit(0);
791   size_t end = bitmap->size();
792   log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
793                 start, end,
794                 start * 100 / total_bits,
795                 end * 100 / total_bits,
796                 (end - start) * 100 / total_bits);
797 }
798 
799 // Update all oop fields embedded in the buffered objects
800 void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
801                                                       AOTMappedHeapInfo* heap_info) {
802   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
803   size_t heap_region_byte_size = _buffer_used;
804   heap_info->oopmap()->resize(heap_region_byte_size   / oopmap_unit);
805 
806   for (int i = 0; i < _source_objs_order->length(); i++) {
807     int src_obj_index = _source_objs_order->at(i)._index;
808     oop src_obj = _source_objs->at(src_obj_index);
809     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
810     assert(info != nullptr, "must be");
811     oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
812     update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
813     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
814     EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
815     src_obj->oop_iterate(&relocator);
816     mark_native_pointers(src_obj);
817   };
818 
819   // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
820   // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
821   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
822     size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
823 
824     objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
825     update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
826     address buffered_obj = offset_to_buffered_address<address>(seg_offset);
827     int length = _heap_root_segments.size_in_elems(seg_idx);
828 
829     size_t elem_size = UseCompressedOops ? sizeof(narrowOop) : sizeof(oop);
830 
831     for (int i = 0; i < length; i++) {
832       // There is no source object; these are native oops - load, translate and
833       // write back
834       size_t elem_offset = objArrayOopDesc::base_offset_in_bytes() + elem_size * i;
835       HeapWord* elem_addr = (HeapWord*)(buffered_obj + elem_offset);
836       oop obj = NativeAccess<>::oop_load(elem_addr);
837       obj = HeapShared::maybe_remap_referent(false /* is_reference_field */, elem_offset, obj);
838       if (UseCompressedOops) {
839         relocate_field_in_buffer<narrowOop>((narrowOop*)elem_addr, obj, heap_info->oopmap());
840       } else {
841         relocate_field_in_buffer<oop>((oop*)elem_addr, obj, heap_info->oopmap());
842       }
843     }
844   }
845 
846   compute_ptrmap(heap_info);
847 
848   size_t total_bytes = (size_t)_buffer->length();
849   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
850   log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
851 }
852 
853 void AOTMappedHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
854   Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
855   if (ptr != nullptr) {
856     NativePointerInfo info;
857     info._src_obj = src_obj;
858     info._field_offset = field_offset;
859     _native_pointers->append(info);
860     HeapShared::set_has_native_pointers(src_obj);
861     _num_native_ptrs ++;
862   }
863 }
864 
865 void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
866   HeapShared::do_metadata_offsets(orig_obj, [&](int offset) {
867     mark_native_pointer(orig_obj, offset);
868   });
869 }
870 
871 void AOTMappedHeapWriter::compute_ptrmap(AOTMappedHeapInfo* heap_info) {
872   int num_non_null_ptrs = 0;
873   Metadata** bottom = (Metadata**) _requested_bottom;
874   Metadata** top = (Metadata**) _requested_top; // exclusive
875   heap_info->ptrmap()->resize(top - bottom);
876 
877   BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
878   for (int i = 0; i < _native_pointers->length(); i++) {
879     NativePointerInfo info = _native_pointers->at(i);
880     oop src_obj = info._src_obj;
881     int field_offset = info._field_offset;
882     HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
883     // requested_field_addr = the address of this field in the requested space
884     oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
885     Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
886     assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
887 
888     // Mark this field in the bitmap
889     BitMap::idx_t idx = requested_field_addr - bottom;
890     heap_info->ptrmap()->set_bit(idx);
891     num_non_null_ptrs ++;
892     max_idx = MAX2(max_idx, idx);
893 
894     // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
895     // this address if the RO/RW regions are mapped at the default location).
896 
897     Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
898     Metadata* native_ptr = *buffered_field_addr;
899     guarantee(native_ptr != nullptr, "sanity");
900 
901     if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
902       native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
903     }
904 
905     guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
906               "Metadata %p should have been archived", native_ptr);
907 
908     address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
909     address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
910     *buffered_field_addr = (Metadata*)requested_native_ptr;
911   }
912 
913   heap_info->ptrmap()->resize(max_idx + 1);
914   log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
915                       num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
916 }
917 
918 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(AOTMappedHeapInfo* heap_info) {
919   class MappedWriterOopIterator : public AOTMappedHeapOopIterator {
920   public:
921     MappedWriterOopIterator(address buffer_start,
922                             address buffer_end,
923                             address requested_base,
924                             address requested_start,
925                             int requested_shift,
926                             size_t num_root_segments) :
927       AOTMappedHeapOopIterator(buffer_start,
928                                buffer_end,
929                                requested_base,
930                                requested_start,
931                                requested_shift,
932                                num_root_segments) {}
933 
934     AOTMapLogger::OopData capture(address buffered_addr) override {
935       oopDesc* raw_oop = (oopDesc*)buffered_addr;
936       size_t size = size_of_buffered_oop(buffered_addr);
937       address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
938       intptr_t target_location = (intptr_t)requested_addr;
939       uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
940       uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
941       Klass* klass = real_klass_of_buffered_oop(buffered_addr);
942 
943       return { buffered_addr,
944                requested_addr,
945                target_location,
946                narrow_location,
947                raw_oop,
948                klass,
949                size,
950                false };
951     }
952   };
953 
954   MemRegion r = heap_info->buffer_region();
955   address buffer_start = address(r.start());
956   address buffer_end = address(r.end());
957 
958   address requested_base = UseCompressedOops ? AOTMappedHeapWriter::narrow_oop_base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
959   address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
960   int requested_shift = AOTMappedHeapWriter::narrow_oop_shift();
961 
962   return new MappedWriterOopIterator(buffer_start,
963                                      buffer_end,
964                                      requested_base,
965                                      requested_start,
966                                      requested_shift,
967                                      heap_info->root_segments().count());
968 }
969 
970 #endif // INCLUDE_CDS_JAVA_HEAP