1 /*
  2  * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotReferenceObjSupport.hpp"
 26 #include "cds/archiveHeapWriter.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/filemap.hpp"
 29 #include "cds/heapShared.hpp"
 30 #include "cds/regeneratedClasses.hpp"
 31 #include "classfile/javaClasses.hpp"
 32 #include "classfile/modules.hpp"
 33 #include "classfile/systemDictionary.hpp"
 34 #include "gc/shared/collectedHeap.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/oopFactory.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/compressedOops.hpp"
 39 #include "oops/objArrayOop.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "oops/oopHandle.inline.hpp"
 42 #include "oops/typeArrayKlass.hpp"
 43 #include "oops/typeArrayOop.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "utilities/bitMap.inline.hpp"
 47 #if INCLUDE_G1GC
 48 #include "gc/g1/g1CollectedHeap.hpp"
 49 #include "gc/g1/g1HeapRegion.hpp"
 50 #endif
 51 
 52 #if INCLUDE_CDS_JAVA_HEAP
 53 
 54 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
 55 
 56 // The following are offsets from buffer_bottom()
 57 size_t ArchiveHeapWriter::_buffer_used;
 58 
 59 // Heap root segments
 60 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
 61 
 62 address ArchiveHeapWriter::_requested_bottom;
 63 address ArchiveHeapWriter::_requested_top;
 64 
 65 static size_t _num_strings = 0;
 66 static size_t _string_bytes = 0; 
 67 static size_t _num_packages = 0;
 68 static size_t _num_protection_domains = 0;
 69 
 70 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
 71 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
 72 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
 73 
 74 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
 75   ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
 76 
 77 
 78 typedef ResourceHashtable<
 79       size_t,    // offset of a filler from ArchiveHeapWriter::buffer_bottom()
 80       size_t,    // size of this filler (in bytes)
 81       127,       // prime number
 82       AnyObj::C_HEAP,
 83       mtClassShared> FillersTable;
 84 static FillersTable* _fillers;
 85 static int _num_native_ptrs = 0;
 86 
 87 void ArchiveHeapWriter::init() {
 88   if (CDSConfig::is_dumping_heap()) {
 89     Universe::heap()->collect(GCCause::_java_lang_system_gc);
 90 
 91     _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
 92     _fillers = new FillersTable();
 93     _requested_bottom = nullptr;
 94     _requested_top = nullptr;
 95 
 96     _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
 97     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
 98 
 99     guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
100   }
101 }
102 
103 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
104   _source_objs->append(src_obj);
105 }
106 
107 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
108                               ArchiveHeapInfo* heap_info) {
109   assert(CDSConfig::is_dumping_heap(), "sanity");
110   allocate_buffer();
111   copy_source_objs_to_buffer(roots);
112   set_requested_address(heap_info);
113   relocate_embedded_oops(roots, heap_info);
114 }
115 
116 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
117   return is_too_large_to_archive(o->size());
118 }
119 
120 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
121   typeArrayOop value = java_lang_String::value_no_keepalive(string);
122   return is_too_large_to_archive(value);
123 }
124 
125 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
126   assert(size > 0, "no zero-size object");
127   assert(size * HeapWordSize > size, "no overflow");
128   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
129 
130   size_t byte_size = size * HeapWordSize;
131   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
132     return true;
133   } else {
134     return false;
135   }
136 }
137 
138 // Various lookup functions between source_obj, buffered_obj and requested_obj
139 bool ArchiveHeapWriter::is_in_requested_range(oop o) {
140   assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
141   address a = cast_from_oop<address>(o);
142   return (_requested_bottom <= a && a < _requested_top);
143 }
144 
145 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
146   oop req_obj = cast_to_oop(_requested_bottom + offset);
147   assert(is_in_requested_range(req_obj), "must be");
148   return req_obj;
149 }
150 
151 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
152   assert(CDSConfig::is_dumping_heap(), "dump-time only");
153   HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
154   if (p != nullptr) {
155     return requested_obj_from_buffer_offset(p->buffer_offset());
156   } else {
157     return nullptr;
158   }
159 }
160 
161 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
162   oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
163   if (p != nullptr) {
164     return *p;
165   } else {
166     return nullptr;
167   }
168 }
169 
170 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
171   return _requested_bottom + buffered_address_to_offset(buffered_addr);
172 }
173 
174 address ArchiveHeapWriter::requested_address() {
175   assert(_buffer != nullptr, "must be initialized");
176   return _requested_bottom;
177 }
178 
179 void ArchiveHeapWriter::allocate_buffer() {
180   int initial_buffer_size = 100000;
181   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
182   _buffer_used = 0;
183   ensure_buffer_space(1); // so that buffer_bottom() works
184 }
185 
186 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
187   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
188   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
189   _buffer->at_grow(to_array_index(min_bytes));
190 }
191 
192 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
193   HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
194   memset(mem, 0, objArrayOopDesc::object_size(element_count));
195 
196   // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
197   if (UseCompactObjectHeaders) {
198     oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
199   } else {
200     oopDesc::set_mark(mem, markWord::prototype());
201     oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
202   }
203   arrayOopDesc::set_length(mem, element_count);
204   return objArrayOop(cast_to_oop(mem));
205 }
206 
207 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
208   // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
209   if (UseCompressedOops) {
210     *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
211   } else {
212     *segment->obj_at_addr<oop>(index) = root;
213   }
214 }
215 
216 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
217   // Depending on the number of classes we are archiving, a single roots array may be
218   // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
219   // allows us to chop the large array into a series of "segments". Current layout
220   // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
221   // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
222   // This is simple and efficient. We do not need filler objects anywhere between the segments,
223   // or immediately after the last segment. This allows starting the object dump immediately
224   // after the roots.
225 
226   assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
227          "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
228 
229   int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
230   assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
231          "Should match exactly");
232 
233   HeapRootSegments segments(_buffer_used,
234                             roots->length(),
235                             MIN_GC_REGION_ALIGNMENT,
236                             max_elem_count);
237 
238   int root_index = 0;
239   for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
240     int size_elems = segments.size_in_elems(seg_idx);
241     size_t size_bytes = segments.size_in_bytes(seg_idx);
242 
243     size_t oop_offset = _buffer_used;
244     _buffer_used = oop_offset + size_bytes;
245     ensure_buffer_space(_buffer_used);
246 
247     assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
248            "Roots segment %zu start is not aligned: %zu",
249            segments.count(), oop_offset);
250 
251     objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems);
252     for (int i = 0; i < size_elems; i++) {
253       root_segment_at_put(seg_oop, i, roots->at(root_index++));
254     }
255 
256     log_info(cds, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT,
257                         size_elems, size_bytes, p2i(seg_oop));
258   }
259 
260   assert(root_index == roots->length(), "Post-condition: All roots are handled");
261 
262   _heap_root_segments = segments;
263 }
264 
265 // The goal is to sort the objects in increasing order of:
266 // - objects that have only oop pointers
267 // - objects that have both native and oop pointers
268 // - objects that have only native pointers
269 // - objects that have no pointers
270 static int oop_sorting_rank(oop o) {
271   bool has_oop_ptr, has_native_ptr;
272   HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
273 
274   if (has_oop_ptr) {
275     if (!has_native_ptr) {
276       return 0;
277     } else {
278       return 1;
279     }
280   } else {
281     if (has_native_ptr) {
282       return 2;
283     } else {
284       return 3;
285     }
286   }
287 }
288 
289 int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
290   int rank_a = a->_rank;
291   int rank_b = b->_rank;
292 
293   if (rank_a != rank_b) {
294     return rank_a - rank_b;
295   } else {
296     // If they are the same rank, sort them by their position in the _source_objs array
297     return a->_index - b->_index;
298   }
299 }
300 
301 void ArchiveHeapWriter::sort_source_objs() {
302   log_info(cds)("sorting heap objects");
303   int len = _source_objs->length();
304   _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
305 
306   for (int i = 0; i < len; i++) {
307     oop o = _source_objs->at(i);
308     int rank = oop_sorting_rank(o);
309     HeapObjOrder os = {i, rank};
310     _source_objs_order->append(os);
311   }
312   log_info(cds)("computed ranks");
313   _source_objs_order->sort(compare_objs_by_oop_fields);
314   log_info(cds)("sorting heap objects done");
315 }
316 
317 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
318   // There could be multiple root segments, which we want to be aligned by region.
319   // Putting them ahead of objects makes sure we waste no space.
320   copy_roots_to_buffer(roots);
321 
322   sort_source_objs();
323   for (int i = 0; i < _source_objs_order->length(); i++) {
324     int src_obj_index = _source_objs_order->at(i)._index;
325     oop src_obj = _source_objs->at(src_obj_index);
326     HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
327     assert(info != nullptr, "must be");
328     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
329     info->set_buffer_offset(buffer_offset);
330     assert(buffer_offset <= 0x7fffffff, "sanity");
331     HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
332 
333     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
334     _buffer_offset_to_source_obj_table->maybe_grow();
335 
336     if (java_lang_Module::is_instance(src_obj)) {
337       Modules::check_archived_module_oop(src_obj);
338     }
339   }
340 
341   log_info(cds)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
342                 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
343   log_info(cds)("   strings            = %8zu (%zu bytes)", _num_strings, _string_bytes);
344   log_info(cds)("   packages           = %8zu", _num_packages);
345   log_info(cds)("   protection domains = %8zu", _num_protection_domains);
346 }
347 
348 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
349   size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
350   return byte_size;
351 }
352 
353 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
354   assert(is_object_aligned(fill_bytes), "must be");
355   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
356 
357   int initial_length = to_array_length(fill_bytes / elemSize);
358   for (int length = initial_length; length >= 0; length --) {
359     size_t array_byte_size = filler_array_byte_size(length);
360     if (array_byte_size == fill_bytes) {
361       return length;
362     }
363   }
364 
365   ShouldNotReachHere();
366   return -1;
367 }
368 
369 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
370   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
371   Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
372   HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
373   memset(mem, 0, fill_bytes);
374   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
375   if (UseCompactObjectHeaders) {
376     oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
377   } else {
378     oopDesc::set_mark(mem, markWord::prototype());
379     cast_to_oop(mem)->set_narrow_klass(nk);
380   }
381   arrayOopDesc::set_length(mem, array_length);
382   return mem;
383 }
384 
385 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
386   // We fill only with arrays (so we don't need to use a single HeapWord filler if the
387   // leftover space is smaller than a zero-sized array object). Therefore, we need to
388   // make sure there's enough space of min_filler_byte_size in the current region after
389   // required_byte_size has been allocated. If not, fill the remainder of the current
390   // region.
391   size_t min_filler_byte_size = filler_array_byte_size(0);
392   size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
393 
394   const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
395   const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
396 
397   if (cur_min_region_bottom != next_min_region_bottom) {
398     // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
399     // we can map the region in any region-based collector.
400     assert(next_min_region_bottom > cur_min_region_bottom, "must be");
401     assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
402            "no buffered object can be larger than %d bytes",  MIN_GC_REGION_ALIGNMENT);
403 
404     const size_t filler_end = next_min_region_bottom;
405     const size_t fill_bytes = filler_end - _buffer_used;
406     assert(fill_bytes > 0, "must be");
407     ensure_buffer_space(filler_end);
408 
409     int array_length = filler_array_length(fill_bytes);
410     log_info(cds, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu",
411                         array_length, fill_bytes, _buffer_used);
412     HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
413     _buffer_used = filler_end;
414     _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
415   }
416 }
417 
418 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
419   size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
420   if (p != nullptr) {
421     assert(*p > 0, "filler must be larger than zero bytes");
422     return *p;
423   } else {
424     return 0; // buffered_addr is not a filler
425   }
426 }
427 
428 template <typename T>
429 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
430   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
431   *field_addr = value;
432 }
433 
434 void ArchiveHeapWriter::update_stats(oop src_obj) {
435   if (java_lang_String::is_instance(src_obj)) {
436     _num_strings ++;
437     _string_bytes += src_obj->size() * HeapWordSize;
438     _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
439   } else {
440     Klass* k = src_obj->klass();
441     Symbol* name = k->name();
442     if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
443       _num_packages ++;
444     } else if (name->equals("java/security/ProtectionDomain")) {
445       _num_protection_domains ++;
446     }
447   }
448 }
449 
450 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
451   update_stats(src_obj);
452 
453   assert(!is_too_large_to_archive(src_obj), "already checked");
454   size_t byte_size = src_obj->size() * HeapWordSize;
455   assert(byte_size > 0, "no zero-size objects");
456 
457   // For region-based collectors such as G1, the archive heap may be mapped into
458   // multiple regions. We need to make sure that we don't have an object that can possible
459   // span across two regions.
460   maybe_fill_gc_region_gap(byte_size);
461 
462   size_t new_used = _buffer_used + byte_size;
463   assert(new_used > _buffer_used, "no wrap around");
464 
465   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
466   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
467   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
468 
469   ensure_buffer_space(new_used);
470 
471   address from = cast_from_oop<address>(src_obj);
472   address to = offset_to_buffered_address<address>(_buffer_used);
473   assert(is_object_aligned(_buffer_used), "sanity");
474   assert(is_object_aligned(byte_size), "sanity");
475   memcpy(to, from, byte_size);
476 
477   // These native pointers will be restored explicitly at run time.
478   if (java_lang_Module::is_instance(src_obj)) {
479     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
480   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
481 #ifdef ASSERT
482     // We only archive these loaders
483     if (src_obj != SystemDictionary::java_platform_loader() &&
484         src_obj != SystemDictionary::java_system_loader()) {
485       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
486     }
487 #endif
488     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
489   }
490 
491   size_t buffered_obj_offset = _buffer_used;
492   _buffer_used = new_used;
493 
494   return buffered_obj_offset;
495 }
496 
497 void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
498   assert(!info->is_used(), "only set once");
499 
500   size_t heap_region_byte_size = _buffer_used;
501   assert(heap_region_byte_size > 0, "must archived at least one object!");
502 
503   if (UseCompressedOops) {
504     if (UseG1GC) {
505       address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
506       log_info(cds, heap)("Heap end = %p", heap_end);
507       _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes);
508       _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT);
509       assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity");
510     } else {
511       _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT);
512     }
513   } else {
514     // We always write the objects as if the heap started at this address. This
515     // makes the contents of the archive heap deterministic.
516     //
517     // Note that at runtime, the heap address is selected by the OS, so the archive
518     // heap will not be mapped at 0x10000000, and the contents need to be patched.
519     _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT);
520   }
521 
522   assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity");
523 
524   _requested_top = _requested_bottom + _buffer_used;
525 
526   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
527                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
528   info->set_heap_root_segments(_heap_root_segments);
529 }
530 
531 // Oop relocation
532 
533 template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
534   assert(is_in_requested_range(cast_to_oop(p)), "must be");
535 
536   address addr = address(p);
537   assert(addr >= _requested_bottom, "must be");
538   size_t offset = addr - _requested_bottom;
539   return offset_to_buffered_address<T*>(offset);
540 }
541 
542 template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
543   oop o = load_oop_from_buffer(buffered_addr);
544   assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
545   return o;
546 }
547 
548 template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
549                                                                             oop request_oop) {
550   assert(is_in_requested_range(request_oop), "must be");
551   store_oop_in_buffer(buffered_addr, request_oop);
552 }
553 
554 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
555   *buffered_addr = requested_obj;
556 }
557 
558 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
559   narrowOop val = CompressedOops::encode_not_null(requested_obj);
560   *buffered_addr = val;
561 }
562 
563 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
564   return *buffered_addr;
565 }
566 
567 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
568   return CompressedOops::decode(*buffered_addr);
569 }
570 
571 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
572   oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
573   if (source_referent != nullptr) {
574     if (java_lang_Class::is_instance(source_referent)) {
575       // When the source object points to a "real" mirror, the buffered object should point
576       // to the "scratch" mirror, which has all unarchivable fields scrubbed (to be reinstated
577       // at run time).
578       source_referent = HeapShared::scratch_java_mirror(source_referent);
579       assert(source_referent != nullptr, "must be");
580     }
581     oop request_referent = source_obj_to_requested_obj(source_referent);
582     store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
583     mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
584   }
585 }
586 
587 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
588   T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
589   address requested_region_bottom;
590 
591   assert(request_p >= (T*)_requested_bottom, "sanity");
592   assert(request_p <  (T*)_requested_top, "sanity");
593   requested_region_bottom = _requested_bottom;
594 
595   // Mark the pointer in the oopmap
596   T* region_bottom = (T*)requested_region_bottom;
597   assert(request_p >= region_bottom, "must be");
598   BitMap::idx_t idx = request_p - region_bottom;
599   assert(idx < oopmap->size(), "overflow");
600   oopmap->set_bit(idx);
601 }
602 
603 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
604   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
605   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
606   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
607 
608   oop fake_oop = cast_to_oop(buffered_addr);
609   if (UseCompactObjectHeaders) {
610     fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
611   } else {
612     fake_oop->set_narrow_klass(nk);
613   }
614 
615   if (src_obj == nullptr) {
616     return;
617   }
618   // We need to retain the identity_hash, because it may have been used by some hashtables
619   // in the shared heap.
620   if (!src_obj->fast_no_hash_check()) {
621     intptr_t src_hash = src_obj->identity_hash();
622     if (UseCompactObjectHeaders) {
623       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
624     } else {
625       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
626     }
627     assert(fake_oop->mark().is_unlocked(), "sanity");
628 
629     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
630     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
631   }
632   // Strip age bits.
633   fake_oop->set_mark(fake_oop->mark().set_age(0));
634 }
635 
636 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
637   oop _src_obj;
638   address _buffered_obj;
639   CHeapBitMap* _oopmap;
640   bool _is_java_lang_ref;
641 public:
642   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
643     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
644   {
645     _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj);
646   }
647 
648   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
649   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
650 
651 private:
652   template <class T> void do_oop_work(T *p) {
653     int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
654     T* field_addr = (T*)(_buffered_obj + field_offset);
655     if (_is_java_lang_ref && AOTReferenceObjSupport::skip_field(field_offset)) {
656       // Do not copy these fields. Set them to null
657       *field_addr = (T)0x0;
658     } else {
659       ArchiveHeapWriter::relocate_field_in_buffer<T>(field_addr, _oopmap);
660     }
661   }
662 };
663 
664 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
665   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
666   size_t start = bitmap->find_first_set_bit(0);
667   size_t end = bitmap->size();
668   log_info(cds)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
669                 start, end,
670                 start * 100 / total_bits,
671                 end * 100 / total_bits,
672                 (end - start) * 100 / total_bits);
673 }
674 
675 // Update all oop fields embedded in the buffered objects
676 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
677                                                ArchiveHeapInfo* heap_info) {
678   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
679   size_t heap_region_byte_size = _buffer_used;
680   heap_info->oopmap()->resize(heap_region_byte_size   / oopmap_unit);
681 
682   for (int i = 0; i < _source_objs_order->length(); i++) {
683     int src_obj_index = _source_objs_order->at(i)._index;
684     oop src_obj = _source_objs->at(src_obj_index);
685     HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
686     assert(info != nullptr, "must be");
687     oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
688     update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
689     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
690     EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
691     src_obj->oop_iterate(&relocator);
692   };
693 
694   // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
695   // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
696   for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
697     size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
698 
699     objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
700     update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
701     address buffered_obj = offset_to_buffered_address<address>(seg_offset);
702     int length = _heap_root_segments.size_in_elems(seg_idx);
703 
704     if (UseCompressedOops) {
705       for (int i = 0; i < length; i++) {
706         narrowOop* addr = (narrowOop*)(buffered_obj + objArrayOopDesc::obj_at_offset<narrowOop>(i));
707         relocate_field_in_buffer<narrowOop>(addr, heap_info->oopmap());
708       }
709     } else {
710       for (int i = 0; i < length; i++) {
711         oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i));
712         relocate_field_in_buffer<oop>(addr, heap_info->oopmap());
713       }
714     }
715   }
716 
717   compute_ptrmap(heap_info);
718 
719   size_t total_bytes = (size_t)_buffer->length();
720   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
721   log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
722 }
723 
724 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
725   Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
726   if (ptr != nullptr) {
727     NativePointerInfo info;
728     info._src_obj = src_obj;
729     info._field_offset = field_offset;
730     _native_pointers->append(info);
731     HeapShared::set_has_native_pointers(src_obj);
732     _num_native_ptrs ++;
733   }
734 }
735 
736 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
737 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
738   HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
739   assert(p != nullptr, "must be");
740 
741   // requested_field_addr = the address of this field in the requested space
742   oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
743   Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
744   assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
745 
746   BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
747   // Leading zeros have been removed so some addresses may not be in the ptrmap
748   size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
749   if (idx < start_pos) {
750     return false;
751   } else {
752     idx -= start_pos;
753   }
754   return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true);
755 }
756 
757 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
758   int num_non_null_ptrs = 0;
759   Metadata** bottom = (Metadata**) _requested_bottom;
760   Metadata** top = (Metadata**) _requested_top; // exclusive
761   heap_info->ptrmap()->resize(top - bottom);
762 
763   BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
764   for (int i = 0; i < _native_pointers->length(); i++) {
765     NativePointerInfo info = _native_pointers->at(i);
766     oop src_obj = info._src_obj;
767     int field_offset = info._field_offset;
768     HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
769     // requested_field_addr = the address of this field in the requested space
770     oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
771     Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
772     assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
773 
774     // Mark this field in the bitmap
775     BitMap::idx_t idx = requested_field_addr - bottom;
776     heap_info->ptrmap()->set_bit(idx);
777     num_non_null_ptrs ++;
778     max_idx = MAX2(max_idx, idx);
779 
780     // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
781     // this address if the RO/RW regions are mapped at the default location).
782 
783     Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
784     Metadata* native_ptr = *buffered_field_addr;
785     guarantee(native_ptr != nullptr, "sanity");
786     guarantee(ArchiveBuilder::current()->has_been_buffered((address)native_ptr),
787               "Metadata %p should have been archived", native_ptr);
788 
789     if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
790       native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
791     }
792 
793     address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
794     address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
795     *buffered_field_addr = (Metadata*)requested_native_ptr;
796   }
797 
798   heap_info->ptrmap()->resize(max_idx + 1);
799   log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
800                       num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
801 }
802 
803 #endif // INCLUDE_CDS_JAVA_HEAP