1 /*
  2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotMappedHeapLoader.hpp"
 26 #include "cds/aotMappedHeapWriter.hpp"
 27 #include "cds/aotReferenceObjSupport.hpp"
 28 #include "cds/cdsConfig.hpp"
 29 #include "cds/filemap.hpp"
 30 #include "cds/heapShared.inline.hpp"
 31 #include "cds/regeneratedClasses.hpp"
 32 #include "classfile/javaClasses.hpp"
 33 #include "classfile/modules.hpp"
 34 #include "classfile/systemDictionary.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "memory/allocation.inline.hpp"
 37 #include "memory/iterator.inline.hpp"
 38 #include "memory/oopFactory.hpp"
 39 #include "memory/universe.hpp"
 40 #include "oops/compressedOops.hpp"
 41 #include "oops/objArrayOop.inline.hpp"
 42 #include "oops/oop.inline.hpp"
 43 #include "oops/oopHandle.inline.hpp"
 44 #include "oops/typeArrayKlass.hpp"
 45 #include "oops/typeArrayOop.hpp"
 46 #include "runtime/java.hpp"
 47 #include "runtime/mutexLocker.hpp"
 48 #include "utilities/bitMap.inline.hpp"
 49 #if INCLUDE_G1GC
 50 #include "gc/g1/g1CollectedHeap.hpp"
 51 #include "gc/g1/g1HeapRegion.hpp"
 52 #endif
 53 
 54 #if INCLUDE_CDS_JAVA_HEAP
 55 
 56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
 57 
 58 // The following are offsets from buffer_bottom()
 59 size_t AOTMappedHeapWriter::_buffer_used;
 60 
 61 // Heap root segments
 62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
 63 
 64 address AOTMappedHeapWriter::_requested_bottom;
 65 address AOTMappedHeapWriter::_requested_top;
 66 
 67 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
 68 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
 69 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
 70 
 71 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
 72 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
 73 
 74 DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
 75 
 76 typedef HashTable<
 77       size_t,    // offset of a filler from ArchiveHeapWriter::buffer_bottom()
 78       size_t,    // size of this filler (in bytes)
 79       127,       // prime number
 80       AnyObj::C_HEAP,
 81       mtClassShared> FillersTable;
 82 static FillersTable* _fillers;
 83 static int _num_native_ptrs = 0;
 84 
 85 void AOTMappedHeapWriter::init() {
 86   if (CDSConfig::is_dumping_heap()) {
 87     Universe::heap()->collect(GCCause::_java_lang_system_gc);
 88 
 89     _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
 90     _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 91     _fillers = new FillersTable();
 92     _requested_bottom = nullptr;
 93     _requested_top = nullptr;
 94 
 95     _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
 96     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
 97 
 98     guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
 99   }
100 }
101 
102 void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
103   delete _source_objs;
104   _source_objs = nullptr;
105 
106   delete _dumped_interned_strings;
107   _dumped_interned_strings = nullptr;
108 }
109 
110 void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
111   _source_objs->append(src_obj);
112 }
113 
114 void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
115                                 ArchiveMappedHeapInfo* heap_info) {
116   assert(CDSConfig::is_dumping_heap(), "sanity");
117   allocate_buffer();
118   copy_source_objs_to_buffer(roots);
119   set_requested_address(heap_info);
120   relocate_embedded_oops(roots, heap_info);
121 }
122 
123 bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
124   return is_too_large_to_archive(o->size());
125 }
126 
127 bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
128   typeArrayOop value = java_lang_String::value_no_keepalive(string);
129   return is_too_large_to_archive(value);
130 }
131 
132 bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
133   assert(size > 0, "no zero-size object");
134   assert(size * HeapWordSize > size, "no overflow");
135   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
136 
137   size_t byte_size = size * HeapWordSize;
138   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
139     return true;
140   } else {
141     return false;
142   }
143 }
144 
145 // Keep track of the contents of the archived interned string table. This table
146 // is used only by CDSHeapVerifier.
147 void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
148   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
149   assert(!is_string_too_large_to_archive(string), "must be");
150   bool created;
151   _dumped_interned_strings->put_if_absent(string, true, &created);
152   if (created) {
153     // Prevent string deduplication from changing the value field to
154     // something not in the archive.
155     java_lang_String::set_deduplication_forbidden(string);
156     _dumped_interned_strings->maybe_grow();
157   }
158 }
159 
160 bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
161   return _dumped_interned_strings->get(o) != nullptr;
162 }
163 
164 // Various lookup functions between source_obj, buffered_obj and requested_obj
165 bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
166   assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
167   address a = cast_from_oop<address>(o);
168   return (_requested_bottom <= a && a < _requested_top);
169 }
170 
171 oop AOTMappedHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
172   oop req_obj = cast_to_oop(_requested_bottom + offset);
173   assert(is_in_requested_range(req_obj), "must be");
174   return req_obj;
175 }
176 
177 oop AOTMappedHeapWriter::source_obj_to_requested_obj(oop src_obj) {
178   assert(CDSConfig::is_dumping_heap(), "dump-time only");
179   HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
180   if (p != nullptr) {
181     return requested_obj_from_buffer_offset(p->buffer_offset());
182   } else {
183     return nullptr;
184   }
185 }
186 
187 oop AOTMappedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
188   OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
189   if (oh != nullptr) {
190     return oh->resolve();
191   } else {
192     return nullptr;
193   }
194 }
195 
196 Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
197   oop p = buffered_addr_to_source_obj(buffered_addr);
198   if (p != nullptr) {
199     return p->klass();
200   } else if (get_filler_size_at(buffered_addr) > 0) {
201     return Universe::fillerArrayKlass();
202   } else {
203     // This is one of the root segments
204     return Universe::objectArrayKlass();
205   }
206 }
207 
208 size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
209   oop p = buffered_addr_to_source_obj(buffered_addr);
210   if (p != nullptr) {
211     return p->size();
212   }
213 
214   size_t nbytes = get_filler_size_at(buffered_addr);
215   if (nbytes > 0) {
216     assert((nbytes % BytesPerWord) == 0, "should be aligned");
217     return nbytes / BytesPerWord;
218   }
219 
220   address hrs = buffer_bottom();
221   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
222     nbytes = _heap_root_segments.size_in_bytes(seg_idx);
223     if (hrs == buffered_addr) {
224       assert((nbytes % BytesPerWord) == 0, "should be aligned");
225       return nbytes / BytesPerWord;
226     }
227     hrs += nbytes;
228   }
229 
230   ShouldNotReachHere();
231   return 0;
232 }
233 
234 address AOTMappedHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
235   return _requested_bottom + buffered_address_to_offset(buffered_addr);
236 }
237 
238 address AOTMappedHeapWriter::requested_address() {
239   assert(_buffer != nullptr, "must be initialized");
240   return _requested_bottom;
241 }
242 
243 void AOTMappedHeapWriter::allocate_buffer() {
244   int initial_buffer_size = 100000;
245   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
246   _buffer_used = 0;
247   ensure_buffer_space(1); // so that buffer_bottom() works
248 }
249 
250 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
251   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
252   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
253   _buffer->at_grow(to_array_index(min_bytes));
254 }
255 
256 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
257   HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
258   memset(mem, 0, objArrayOopDesc::object_size(element_count));
259 
260   // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
261   if (UseCompactObjectHeaders) {
262     oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
263   } else {
264     oopDesc::set_mark(mem, markWord::prototype());
265     oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
266   }
267   arrayOopDesc::set_length(mem, element_count);
268   return objArrayOop(cast_to_oop(mem));
269 }
270 
271 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
272   // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
273   if (UseCompressedOops) {
274     *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
275   } else {
276     *segment->obj_at_addr<oop>(index) = root;
277   }
278 }
279 
280 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
281   // Depending on the number of classes we are archiving, a single roots array may be
282   // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
283   // allows us to chop the large array into a series of "segments". Current layout
284   // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
285   // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
286   // This is simple and efficient. We do not need filler objects anywhere between the segments,
287   // or immediately after the last segment. This allows starting the object dump immediately
288   // after the roots.
289 
290   assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
291          "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
292 
293   int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
294   assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
295          "Should match exactly");
296 
297   HeapRootSegments segments(_buffer_used,
298                             roots->length(),
299                             MIN_GC_REGION_ALIGNMENT,
300                             max_elem_count);
301 
302   int root_index = 0;
303   for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
304     int size_elems = segments.size_in_elems(seg_idx);
305     size_t size_bytes = segments.size_in_bytes(seg_idx);
306 
307     size_t oop_offset = _buffer_used;
308     _buffer_used = oop_offset + size_bytes;
309     ensure_buffer_space(_buffer_used);
310 
311     assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
312            "Roots segment %zu start is not aligned: %zu",
313            segments.count(), oop_offset);
314 
315     objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems);
316     for (int i = 0; i < size_elems; i++) {
317       root_segment_at_put(seg_oop, i, roots->at(root_index++));
318     }
319 
320     log_info(aot, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT,
321                         size_elems, size_bytes, p2i(seg_oop));
322   }
323 
324   assert(root_index == roots->length(), "Post-condition: All roots are handled");
325 
326   _heap_root_segments = segments;
327 }
328 
329 // The goal is to sort the objects in increasing order of:
330 // - objects that have only oop pointers
331 // - objects that have both native and oop pointers
332 // - objects that have only native pointers
333 // - objects that have no pointers
334 static int oop_sorting_rank(oop o) {
335   bool has_oop_ptr, has_native_ptr;
336   HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
337 
338   if (has_oop_ptr) {
339     if (!has_native_ptr) {
340       return 0;
341     } else {
342       return 1;
343     }
344   } else {
345     if (has_native_ptr) {
346       return 2;
347     } else {
348       return 3;
349     }
350   }
351 }
352 
353 int AOTMappedHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
354   int rank_a = a->_rank;
355   int rank_b = b->_rank;
356 
357   if (rank_a != rank_b) {
358     return rank_a - rank_b;
359   } else {
360     // If they are the same rank, sort them by their position in the _source_objs array
361     return a->_index - b->_index;
362   }
363 }
364 
365 void AOTMappedHeapWriter::sort_source_objs() {
366   log_info(aot)("sorting heap objects");
367   int len = _source_objs->length();
368   _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
369 
370   for (int i = 0; i < len; i++) {
371     oop o = _source_objs->at(i);
372     int rank = oop_sorting_rank(o);
373     HeapObjOrder os = {i, rank};
374     _source_objs_order->append(os);
375   }
376   log_info(aot)("computed ranks");
377   _source_objs_order->sort(compare_objs_by_oop_fields);
378   log_info(aot)("sorting heap objects done");
379 }
380 
381 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
382   // There could be multiple root segments, which we want to be aligned by region.
383   // Putting them ahead of objects makes sure we waste no space.
384   copy_roots_to_buffer(roots);
385 
386   sort_source_objs();
387   for (int i = 0; i < _source_objs_order->length(); i++) {
388     int src_obj_index = _source_objs_order->at(i)._index;
389     oop src_obj = _source_objs->at(src_obj_index);
390     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
391     assert(info != nullptr, "must be");
392     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
393     info->set_buffer_offset(buffer_offset);
394 
395     OopHandle handle(Universe::vm_global(), src_obj);
396     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
397     _buffer_offset_to_source_obj_table->maybe_grow();
398 
399     if (java_lang_Module::is_instance(src_obj)) {
400       Modules::check_archived_module_oop(src_obj);
401     }
402   }
403 
404   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
405                 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
406 }
407 
408 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
409   size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
410   return byte_size;
411 }
412 
413 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
414   assert(is_object_aligned(fill_bytes), "must be");
415   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
416 
417   int initial_length = to_array_length(fill_bytes / elemSize);
418   for (int length = initial_length; length >= 0; length --) {
419     size_t array_byte_size = filler_array_byte_size(length);
420     if (array_byte_size == fill_bytes) {
421       return length;
422     }
423   }
424 
425   ShouldNotReachHere();
426   return -1;
427 }
428 
429 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
430   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
431   Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
432   HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
433   memset(mem, 0, fill_bytes);
434   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
435   if (UseCompactObjectHeaders) {
436     oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
437   } else {
438     oopDesc::set_mark(mem, markWord::prototype());
439     cast_to_oop(mem)->set_narrow_klass(nk);
440   }
441   arrayOopDesc::set_length(mem, array_length);
442   return mem;
443 }
444 
445 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
446   // We fill only with arrays (so we don't need to use a single HeapWord filler if the
447   // leftover space is smaller than a zero-sized array object). Therefore, we need to
448   // make sure there's enough space of min_filler_byte_size in the current region after
449   // required_byte_size has been allocated. If not, fill the remainder of the current
450   // region.
451   size_t min_filler_byte_size = filler_array_byte_size(0);
452   size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
453 
454   const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
455   const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
456 
457   if (cur_min_region_bottom != next_min_region_bottom) {
458     // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
459     // we can map the region in any region-based collector.
460     assert(next_min_region_bottom > cur_min_region_bottom, "must be");
461     assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
462            "no buffered object can be larger than %d bytes",  MIN_GC_REGION_ALIGNMENT);
463 
464     const size_t filler_end = next_min_region_bottom;
465     const size_t fill_bytes = filler_end - _buffer_used;
466     assert(fill_bytes > 0, "must be");
467     ensure_buffer_space(filler_end);
468 
469     int array_length = filler_array_length(fill_bytes);
470     log_info(aot, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu",
471                         array_length, fill_bytes, _buffer_used);
472     HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
473     _buffer_used = filler_end;
474     _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
475   }
476 }
477 
478 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
479   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
480   if (p != nullptr) {
481     assert(*p > 0, "filler must be larger than zero bytes");
482     return *p;
483   } else {
484     return 0; // buffered_addr is not a filler
485   }
486 }
487 
488 template <typename T>
489 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
490   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
491   *field_addr = value;
492 }
493 
494 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
495   assert(!is_too_large_to_archive(src_obj), "already checked");
496   size_t byte_size = src_obj->size() * HeapWordSize;
497   assert(byte_size > 0, "no zero-size objects");
498 
499   // For region-based collectors such as G1, the archive heap may be mapped into
500   // multiple regions. We need to make sure that we don't have an object that can possible
501   // span across two regions.
502   maybe_fill_gc_region_gap(byte_size);
503 
504   size_t new_used = _buffer_used + byte_size;
505   assert(new_used > _buffer_used, "no wrap around");
506 
507   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
508   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
509   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
510 
511   ensure_buffer_space(new_used);
512 
513   address from = cast_from_oop<address>(src_obj);
514   address to = offset_to_buffered_address<address>(_buffer_used);
515   assert(is_object_aligned(_buffer_used), "sanity");
516   assert(is_object_aligned(byte_size), "sanity");
517   memcpy(to, from, byte_size);
518 
519   // These native pointers will be restored explicitly at run time.
520   if (java_lang_Module::is_instance(src_obj)) {
521     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
522   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
523 #ifdef ASSERT
524     // We only archive these loaders
525     if (src_obj != SystemDictionary::java_platform_loader() &&
526         src_obj != SystemDictionary::java_system_loader()) {
527       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
528     }
529 #endif
530     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
531   }
532 
533   size_t buffered_obj_offset = _buffer_used;
534   _buffer_used = new_used;
535 
536   return buffered_obj_offset;
537 }
538 
539 void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
540   assert(!info->is_used(), "only set once");
541 
542   size_t heap_region_byte_size = _buffer_used;
543   assert(heap_region_byte_size > 0, "must archived at least one object!");
544 
545   if (UseCompressedOops) {
546     if (UseG1GC) {
547       address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
548       log_info(aot, heap)("Heap end = %p", heap_end);
549       _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
550       _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT);
551       assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
552     } else {
553       _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT);
554     }
555   } else {
556     // We always write the objects as if the heap started at this address. This
557     // makes the contents of the archive heap deterministic.
558     //
559     // Note that at runtime, the heap address is selected by the OS, so the archive
560     // heap will not be mapped at 0x10000000, and the contents need to be patched.
561     _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT);
562   }
563 
564   assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity");
565 
566   _requested_top = _requested_bottom + _buffer_used;
567 
568   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
569                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
570   info->set_root_segments(_heap_root_segments);
571 }
572 
573 // Oop relocation
574 
575 template <typename T> T* AOTMappedHeapWriter::requested_addr_to_buffered_addr(T* p) {
576   assert(is_in_requested_range(cast_to_oop(p)), "must be");
577 
578   address addr = address(p);
579   assert(addr >= _requested_bottom, "must be");
580   size_t offset = addr - _requested_bottom;
581   return offset_to_buffered_address<T*>(offset);
582 }
583 
584 template <typename T> oop AOTMappedHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
585   oop o = load_oop_from_buffer(buffered_addr);
586   assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
587   return o;
588 }
589 
590 template <typename T> void AOTMappedHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
591                                                                                    oop request_oop) {
592   assert(request_oop == nullptr || is_in_requested_range(request_oop), "must be");
593   store_oop_in_buffer(buffered_addr, request_oop);
594 }
595 
596 inline void AOTMappedHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
597   *buffered_addr = requested_obj;
598 }
599 
600 inline void AOTMappedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
601   narrowOop val = CompressedOops::encode(requested_obj);
602   *buffered_addr = val;
603 }
604 
605 oop AOTMappedHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
606   return *buffered_addr;
607 }
608 
609 oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
610   return CompressedOops::decode(*buffered_addr);
611 }
612 
613 template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
614   oop request_referent = source_obj_to_requested_obj(source_referent);
615   store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
616   if (request_referent != nullptr) {
617     mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
618   }
619 }
620 
621 template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
622   T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
623   address requested_region_bottom;
624 
625   assert(request_p >= (T*)_requested_bottom, "sanity");
626   assert(request_p <  (T*)_requested_top, "sanity");
627   requested_region_bottom = _requested_bottom;
628 
629   // Mark the pointer in the oopmap
630   T* region_bottom = (T*)requested_region_bottom;
631   assert(request_p >= region_bottom, "must be");
632   BitMap::idx_t idx = request_p - region_bottom;
633   assert(idx < oopmap->size(), "overflow");
634   oopmap->set_bit(idx);
635 }
636 
637 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
638   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
639   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
640   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
641 
642   oop fake_oop = cast_to_oop(buffered_addr);
643   if (UseCompactObjectHeaders) {
644     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
645   } else {
646     fake_oop->set_narrow_klass(nk);
647   }
648 
649   if (src_obj == nullptr) {
650     return;
651   }
652   // We need to retain the identity_hash, because it may have been used by some hashtables
653   // in the shared heap.
654   if (!src_obj->fast_no_hash_check()) {
655     intptr_t src_hash = src_obj->identity_hash();
656     if (UseCompactObjectHeaders) {
657       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
658     } else {
659       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
660     }
661     assert(fake_oop->mark().is_unlocked(), "sanity");
662 
663     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
664     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
665   }
666   // Strip age bits.
667   fake_oop->set_mark(fake_oop->mark().set_age(0));
668 }
669 
670 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
671   oop _src_obj;
672   address _buffered_obj;
673   CHeapBitMap* _oopmap;
674   bool _is_java_lang_ref;
675 public:
676   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
677     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
678   {
679     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
680   }
681 
682   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
683   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
684 
685 private:
686   template <class T> void do_oop_work(T *p) {
687     int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
688     T* field_addr = (T*)(_buffered_obj + field_offset);
689     oop referent = load_source_oop_from_buffer<T>(field_addr);
690     referent = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, referent);
691     AOTMappedHeapWriter::relocate_field_in_buffer<T>(field_addr, referent, _oopmap);
692   }
693 };
694 
695 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
696   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
697   size_t start = bitmap->find_first_set_bit(0);
698   size_t end = bitmap->size();
699   log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
700                 start, end,
701                 start * 100 / total_bits,
702                 end * 100 / total_bits,
703                 (end - start) * 100 / total_bits);
704 }
705 
706 // Update all oop fields embedded in the buffered objects
707 void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
708                                                       ArchiveMappedHeapInfo* heap_info) {
709   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
710   size_t heap_region_byte_size = _buffer_used;
711   heap_info->oopmap()->resize(heap_region_byte_size   / oopmap_unit);
712 
713   for (int i = 0; i < _source_objs_order->length(); i++) {
714     int src_obj_index = _source_objs_order->at(i)._index;
715     oop src_obj = _source_objs->at(src_obj_index);
716     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
717     assert(info != nullptr, "must be");
718     oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
719     update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
720     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
721     EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
722     src_obj->oop_iterate(&relocator);
723     mark_native_pointers(src_obj);
724   };
725 
726   // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
727   // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
728   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
729     size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
730 
731     objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
732     update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
733     address buffered_obj = offset_to_buffered_address<address>(seg_offset);
734     int length = _heap_root_segments.size_in_elems(seg_idx);
735 
736     size_t elem_size = UseCompressedOops ? sizeof(narrowOop) : sizeof(oop);
737 
738     for (int i = 0; i < length; i++) {
739       // There is no source object; these are native oops - load, translate and
740       // write back
741       size_t elem_offset = objArrayOopDesc::base_offset_in_bytes() + elem_size * i;
742       HeapWord* elem_addr = (HeapWord*)(buffered_obj + elem_offset);
743       oop obj = NativeAccess<>::oop_load(elem_addr);
744       obj = HeapShared::maybe_remap_referent(false /* is_reference_field */, elem_offset, obj);
745       if (UseCompressedOops) {
746         relocate_field_in_buffer<narrowOop>((narrowOop*)elem_addr, obj, heap_info->oopmap());
747       } else {
748         relocate_field_in_buffer<oop>((oop*)elem_addr, obj, heap_info->oopmap());
749       }
750     }
751   }
752 
753   compute_ptrmap(heap_info);
754 
755   size_t total_bytes = (size_t)_buffer->length();
756   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
757   log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
758 }
759 
760 void AOTMappedHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
761   Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
762   if (ptr != nullptr) {
763     NativePointerInfo info;
764     info._src_obj = src_obj;
765     info._field_offset = field_offset;
766     _native_pointers->append(info);
767     HeapShared::set_has_native_pointers(src_obj);
768     _num_native_ptrs ++;
769   }
770 }
771 
772 void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
773   HeapShared::do_metadata_offsets(orig_obj, [&](int offset) {
774     mark_native_pointer(orig_obj, offset);
775   });
776 }
777 
778 void AOTMappedHeapWriter::compute_ptrmap(ArchiveMappedHeapInfo* heap_info) {
779   int num_non_null_ptrs = 0;
780   Metadata** bottom = (Metadata**) _requested_bottom;
781   Metadata** top = (Metadata**) _requested_top; // exclusive
782   heap_info->ptrmap()->resize(top - bottom);
783 
784   BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
785   for (int i = 0; i < _native_pointers->length(); i++) {
786     NativePointerInfo info = _native_pointers->at(i);
787     oop src_obj = info._src_obj;
788     int field_offset = info._field_offset;
789     HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
790     // requested_field_addr = the address of this field in the requested space
791     oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
792     Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
793     assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
794 
795     // Mark this field in the bitmap
796     BitMap::idx_t idx = requested_field_addr - bottom;
797     heap_info->ptrmap()->set_bit(idx);
798     num_non_null_ptrs ++;
799     max_idx = MAX2(max_idx, idx);
800 
801     // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
802     // this address if the RO/RW regions are mapped at the default location).
803 
804     Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
805     Metadata* native_ptr = *buffered_field_addr;
806     guarantee(native_ptr != nullptr, "sanity");
807 
808     if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
809       native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
810     }
811 
812     guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
813               "Metadata %p should have been archived", native_ptr);
814 
815     address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
816     address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
817     *buffered_field_addr = (Metadata*)requested_native_ptr;
818   }
819 
820   heap_info->ptrmap()->resize(max_idx + 1);
821   log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
822                       num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
823 }
824 
825 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
826   class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
827   private:
828     address _current;
829     address _next;
830 
831     address _buffer_start;
832     address _buffer_end;
833     uint64_t _buffer_start_narrow_oop;
834     intptr_t _buffer_to_requested_delta;
835     int _requested_shift;
836 
837     size_t _num_root_segments;
838     size_t _num_obj_arrays_logged;
839 
840   public:
841     MappedWriterOopIterator(address buffer_start,
842                             address buffer_end,
843                             uint64_t buffer_start_narrow_oop,
844                             intptr_t buffer_to_requested_delta,
845                             int requested_shift,
846                             size_t num_root_segments)
847       : _current(nullptr),
848         _next(buffer_start),
849         _buffer_start(buffer_start),
850         _buffer_end(buffer_end),
851         _buffer_start_narrow_oop(buffer_start_narrow_oop),
852         _buffer_to_requested_delta(buffer_to_requested_delta),
853         _requested_shift(requested_shift),
854         _num_root_segments(num_root_segments),
855         _num_obj_arrays_logged(0) {
856     }
857 
858     AOTMapLogger::OopData capture(address buffered_addr) {
859       oopDesc* raw_oop = (oopDesc*)buffered_addr;
860       size_t size = size_of_buffered_oop(buffered_addr);
861       address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
862       intptr_t target_location = (intptr_t)requested_addr;
863       uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
864       uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
865       Klass* klass = real_klass_of_buffered_oop(buffered_addr);
866 
867       return { buffered_addr,
868                requested_addr,
869                target_location,
870                narrow_location,
871                raw_oop,
872                klass,
873                size,
874                false };
875     }
876 
877     bool has_next() override {
878       return _next < _buffer_end;
879     }
880 
881     AOTMapLogger::OopData next() override {
882       _current = _next;
883       AOTMapLogger::OopData result = capture(_current);
884       if (result._klass->is_objArray_klass()) {
885         result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
886       }
887       _next = _current + result._size * BytesPerWord;
888       return result;
889     }
890 
891     AOTMapLogger::OopData obj_at(narrowOop* addr) override {
892       uint64_t n = (uint64_t)(*addr);
893       if (n == 0) {
894         return null_data();
895       } else {
896         precond(n >= _buffer_start_narrow_oop);
897         address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
898         return capture(buffer_addr);
899       }
900     }
901 
902     AOTMapLogger::OopData obj_at(oop* addr) override {
903       address requested_value = cast_from_oop<address>(*addr);
904       if (requested_value == nullptr) {
905         return null_data();
906       } else {
907         address buffer_addr = requested_value - _buffer_to_requested_delta;
908         return capture(buffer_addr);
909       }
910     }
911 
912     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
913       return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
914     }
915   };
916 
917   MemRegion r = heap_info->buffer_region();
918   address buffer_start = address(r.start());
919   address buffer_end = address(r.end());
920 
921   address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
922   address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
923   int requested_shift =  CompressedOops::shift();
924   intptr_t buffer_to_requested_delta = requested_start - buffer_start;
925   uint64_t buffer_start_narrow_oop = 0xdeadbeed;
926   if (UseCompressedOops) {
927     buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
928     assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
929   }
930 
931   return new MappedWriterOopIterator(buffer_start,
932                                      buffer_end,
933                                      buffer_start_narrow_oop,
934                                      buffer_to_requested_delta,
935                                      requested_shift,
936                                      heap_info->root_segments().count());
937 }
938 
939 #endif // INCLUDE_CDS_JAVA_HEAP