1 /*
  2  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotReferenceObjSupport.hpp"
 26 #include "cds/aotStreamedHeapWriter.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/filemap.hpp"
 29 #include "cds/heapShared.inline.hpp"
 30 #include "cds/regeneratedClasses.hpp"
 31 #include "classfile/modules.hpp"
 32 #include "classfile/stringTable.hpp"
 33 #include "classfile/systemDictionary.hpp"
 34 #include "gc/shared/collectedHeap.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/oopFactory.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/compressedOops.hpp"
 39 #include "oops/objArrayOop.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "oops/oopHandle.inline.hpp"
 42 #include "oops/typeArrayKlass.hpp"
 43 #include "oops/typeArrayOop.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "utilities/bitMap.inline.hpp"
 47 #include "utilities/stack.inline.hpp"
 48 
 49 #if INCLUDE_CDS_JAVA_HEAP
 50 
 51 GrowableArrayCHeap<u1, mtClassShared>* AOTStreamedHeapWriter::_buffer = nullptr;
 52 
 53 // The following are offsets from buffer_bottom()
 54 size_t AOTStreamedHeapWriter::_buffer_used;
 55 size_t AOTStreamedHeapWriter::_roots_offset;
 56 size_t AOTStreamedHeapWriter::_forwarding_offset;
 57 size_t AOTStreamedHeapWriter::_root_highest_object_index_table_offset;
 58 
 59 GrowableArrayCHeap<oop, mtClassShared>* AOTStreamedHeapWriter::_source_objs;
 60 
 61 AOTStreamedHeapWriter::BufferOffsetToSourceObjectTable* AOTStreamedHeapWriter::_buffer_offset_to_source_obj_table;
 62 AOTStreamedHeapWriter::SourceObjectToDFSOrderTable* AOTStreamedHeapWriter::_dfs_order_table;
 63 
 64 int* AOTStreamedHeapWriter::_roots_highest_dfs;
 65 size_t* AOTStreamedHeapWriter::_dfs_to_archive_object_table;
 66 
 67 static const int max_table_capacity = 0x3fffffff;
 68 
 69 void AOTStreamedHeapWriter::init() {
 70   if (CDSConfig::is_dumping_heap()) {
 71     _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(8, max_table_capacity);
 72 
 73     int initial_source_objs_capacity = 10000;
 74     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(initial_source_objs_capacity);
 75   }
 76 }
 77 
 78 void AOTStreamedHeapWriter::delete_tables_with_raw_oops() {
 79   delete _source_objs;
 80   _source_objs = nullptr;
 81 
 82   delete _dfs_order_table;
 83   _dfs_order_table = nullptr;
 84 }
 85 
 86 void AOTStreamedHeapWriter::add_source_obj(oop src_obj) {
 87   _source_objs->append(src_obj);
 88 }
 89 
 90 class FollowOopIterateClosure: public BasicOopIterateClosure {
 91   Stack<oop, mtClassShared>* _dfs_stack;
 92   oop _src_obj;
 93   bool _is_java_lang_ref;
 94 
 95 public:
 96   FollowOopIterateClosure(Stack<oop, mtClassShared>* dfs_stack, oop src_obj, bool is_java_lang_ref) :
 97     _dfs_stack(dfs_stack),
 98     _src_obj(src_obj),
 99     _is_java_lang_ref(is_java_lang_ref) {}
100 
101   void do_oop(narrowOop *p) { do_oop_work(p); }
102   void do_oop(      oop *p) { do_oop_work(p); }
103 
104 private:
105   template <class T> void do_oop_work(T *p) {
106     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
107     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
108     if (obj != nullptr) {
109       _dfs_stack->push(obj);
110     }
111   }
112 };
113 
114 int AOTStreamedHeapWriter::cmp_dfs_order(oop* o1, oop* o2) {
115   int* o1_dfs = _dfs_order_table->get(*o1);
116   int* o2_dfs = _dfs_order_table->get(*o2);
117   return *o1_dfs - *o2_dfs;
118 }
119 
120 void AOTStreamedHeapWriter::order_source_objs(GrowableArrayCHeap<oop, mtClassShared>* roots) {
121   Stack<oop, mtClassShared> dfs_stack;
122   _dfs_order_table = new (mtClassShared) SourceObjectToDFSOrderTable(8, max_table_capacity);
123   _roots_highest_dfs = NEW_C_HEAP_ARRAY(int, (size_t)roots->length(), mtClassShared);
124   _dfs_to_archive_object_table = NEW_C_HEAP_ARRAY(size_t, (size_t)_source_objs->length() + 1, mtClassShared);
125 
126   for (int i = 0; i < _source_objs->length(); ++i) {
127     oop obj = _source_objs->at(i);
128     _dfs_order_table->put(cast_from_oop<void*>(obj), -1);
129     _dfs_order_table->maybe_grow();
130   }
131 
132   int dfs_order = 0;
133 
134   for (int i = 0; i < roots->length(); ++i) {
135     oop root = roots->at(i);
136 
137     if (root == nullptr) {
138       log_info(aot, heap)("null root at %d", i);
139       continue;
140     }
141 
142     dfs_stack.push(root);
143 
144     while (!dfs_stack.is_empty()) {
145       oop obj = dfs_stack.pop();
146       assert(obj != nullptr, "null root");
147       int* dfs_number = _dfs_order_table->get(cast_from_oop<void*>(obj));
148       if (*dfs_number != -1) {
149         // Already visited in the traversal
150         continue;
151       }
152       _dfs_order_table->put(cast_from_oop<void*>(obj), ++dfs_order);
153       _dfs_order_table->maybe_grow();
154 
155       FollowOopIterateClosure cl(&dfs_stack, obj, AOTReferenceObjSupport::check_if_ref_obj(obj));
156       obj->oop_iterate(&cl);
157     }
158 
159     _roots_highest_dfs[i] = dfs_order;
160   }
161 
162   _source_objs->sort(cmp_dfs_order);
163 }
164 
165 void AOTStreamedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
166                                   ArchiveStreamedHeapInfo* heap_info) {
167   assert(CDSConfig::is_dumping_heap(), "sanity");
168   allocate_buffer();
169   order_source_objs(roots);
170   copy_source_objs_to_buffer(roots);
171   map_embedded_oops(heap_info);
172   populate_archive_heap_info(heap_info);
173 }
174 
175 void AOTStreamedHeapWriter::allocate_buffer() {
176   int initial_buffer_size = 100000;
177   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
178   _buffer_used = 0;
179   ensure_buffer_space(1); // so that buffer_bottom() works
180 }
181 
182 void AOTStreamedHeapWriter::ensure_buffer_space(size_t min_bytes) {
183   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
184   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
185   _buffer->at_grow(to_array_index(min_bytes));
186 }
187 
188 void AOTStreamedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
189   int length = roots->length();
190   size_t byte_size = align_up(sizeof(int) + sizeof(int) * (size_t)length, (size_t)HeapWordSize);
191 
192   size_t new_used = _buffer_used + byte_size;
193   ensure_buffer_space(new_used);
194 
195   int* mem = offset_to_buffered_address<int*>(_buffer_used);
196   memset(mem, 0, byte_size);
197   *mem = length;
198 
199   for (int i = 0; i < length; i++) {
200     // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
201     oop o = roots->at(i);
202     int dfs_index = o == nullptr ? 0 : *_dfs_order_table->get(cast_from_oop<void*>(o));
203     mem[i + 1] = dfs_index;
204   }
205   log_info(aot, heap)("archived obj roots[%d] = %zu bytes, mem = %p", length, byte_size, mem);
206 
207   _roots_offset = _buffer_used;
208   _buffer_used = new_used;
209 }
210 
211 template <typename T>
212 void AOTStreamedHeapWriter::write(T value) {
213   size_t new_used = _buffer_used + sizeof(T);
214   ensure_buffer_space(new_used);
215   T* mem = offset_to_buffered_address<T*>(_buffer_used);
216   *mem = value;
217   _buffer_used = new_used;
218 }
219 
220 void AOTStreamedHeapWriter::copy_forwarding_to_buffer() {
221   _forwarding_offset = _buffer_used;
222 
223   write<size_t>(0); // The first entry is the null entry
224 
225   // Write a mapping from object index to buffer offset
226   for (int i = 1; i <= _source_objs->length(); i++) {
227     size_t buffer_offset = _dfs_to_archive_object_table[i];
228     write(buffer_offset);
229   }
230 }
231 
232 void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
233   _root_highest_object_index_table_offset = _buffer_used;
234 
235   for (int i = 0; i < roots_length; ++i) {
236     int highest_dfs = _roots_highest_dfs[i];
237     write(highest_dfs);
238   }
239 
240   if ((roots_length % 2) != 0) {
241     write(-1); // Align up to a 64 bit word
242   }
243 }
244 
245 static bool is_interned_string(oop obj) {
246   if (!java_lang_String::is_instance(obj)) {
247     return false;
248   }
249 
250   ResourceMark rm;
251   int len;
252   jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
253   if (name == nullptr) {
254     fatal("Insufficient memory for dumping");
255   }
256   return StringTable::lookup(name, len) == obj;
257 }
258 
259 static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
260   if (UseCompressedOops) {
261     return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
262   } else {
263     return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
264   }
265 }
266 
267 bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) {
268   return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr;
269 }
270 
271 void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
272   for (int i = 0; i < _source_objs->length(); i++) {
273     oop src_obj = _source_objs->at(i);
274     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
275     assert(info != nullptr, "must be");
276     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
277     info->set_buffer_offset(buffer_offset);
278 
279     OopHandle handle(Universe::vm_global(), src_obj);
280     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
281     _buffer_offset_to_source_obj_table->maybe_grow();
282 
283     int dfs_order = i + 1;
284     _dfs_to_archive_object_table[dfs_order] = buffer_offset;
285   }
286 
287   copy_roots_to_buffer(roots);
288   copy_forwarding_to_buffer();
289   copy_roots_max_dfs_to_buffer(roots->length());
290 
291   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots",
292                 _buffer_used, _source_objs->length() + 1, roots->length());
293 }
294 
295 template <typename T>
296 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
297   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
298   *field_addr = value;
299 }
300 
301 static bool needs_explicit_size(oop src_obj) {
302   Klass* klass = src_obj->klass();
303   int lh = klass->layout_helper();
304 
305   // Simple instances or arrays don't need explicit size
306   if (Klass::layout_helper_is_instance(lh)) {
307     return Klass::layout_helper_needs_slow_path(lh);
308   }
309 
310   return !Klass::layout_helper_is_array(lh);
311 }
312 
313 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
314   if (needs_explicit_size(src_obj)) {
315     // Explicitly write object size for more complex objects, to avoid having to
316     // pretend the buffer objects are objects when loading the objects, in order
317     // to read the size. Most of the time, the layout helper of the class is enough.
318     write<size_t>(src_obj->size());
319   }
320   size_t byte_size = src_obj->size() * HeapWordSize;
321   assert(byte_size > 0, "no zero-size objects");
322 
323   size_t new_used = _buffer_used + byte_size;
324   assert(new_used > _buffer_used, "no wrap around");
325 
326   ensure_buffer_space(new_used);
327 
328   if (is_interned_string(src_obj)) {
329     java_lang_String::hash_code(src_obj);                   // Sets the hash code field(s)
330     java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
331     assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
332   }
333 
334   address from = cast_from_oop<address>(src_obj);
335   address to = offset_to_buffered_address<address>(_buffer_used);
336   assert(is_object_aligned(_buffer_used), "sanity");
337   assert(is_object_aligned(byte_size), "sanity");
338   memcpy(to, from, byte_size);
339 
340   if (java_lang_Module::is_instance(src_obj)) {
341     // These native pointers will be restored explicitly at run time.
342     Modules::check_archived_module_oop(src_obj);
343     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
344   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
345 #ifdef ASSERT
346     // We only archive these loaders
347     if (src_obj != SystemDictionary::java_platform_loader() &&
348         src_obj != SystemDictionary::java_system_loader()) {
349       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
350     }
351 #endif
352     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
353   }
354 
355   size_t buffered_obj_offset = _buffer_used;
356   _buffer_used = new_used;
357 
358   return buffered_obj_offset;
359 }
360 
361 // Oop mapping
362 
363 inline void AOTStreamedHeapWriter::store_oop_in_buffer(oop* buffered_addr, int dfs_index) {
364   *(ssize_t*)buffered_addr = dfs_index;
365 }
366 
367 inline void AOTStreamedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, int dfs_index) {
368   *(int32_t*)buffered_addr = (int32_t)dfs_index;
369 }
370 
371 template <typename T> void AOTStreamedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
372   // Mark the pointer in the oopmap
373   size_t buffered_offset = buffered_address_to_offset((address)buffered_addr);
374   BitMap::idx_t idx = bit_idx_for_buffer_offset(buffered_offset);
375   oopmap->set_bit(idx);
376 }
377 
378 template <typename T> void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop obj, T* field_addr_in_buffer, CHeapBitMap* oopmap) {
379   if (obj == nullptr) {
380     store_oop_in_buffer(field_addr_in_buffer, 0);
381   } else {
382     int dfs_index = *_dfs_order_table->get(obj);
383     store_oop_in_buffer(field_addr_in_buffer, dfs_index);
384   }
385 
386   mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
387 }
388 
389 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj,  Klass* src_klass) {
390   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
391   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
392 
393   markWord mw = markWord::prototype();
394   oopDesc* fake_oop = (oopDesc*)buffered_addr;
395 
396   // We need to retain the identity_hash, because it may have been used by some hashtables
397   // in the shared heap. This also has the side effect of pre-initializing the
398   // identity_hash for all shared objects, so they are less likely to be written
399   // into during run time, increasing the potential of memory sharing.
400   if (src_obj != nullptr) {
401     intptr_t src_hash = src_obj->identity_hash();
402     mw = mw.copy_set_hash(src_hash);
403   }
404 
405   if (is_interned_string(src_obj)) {
406     // Mark the mark word of interned string so the loader knows to link these to
407     // the string table at runtime.
408     mw = mw.set_marked();
409   }
410 
411   if (UseCompactObjectHeaders) {
412     fake_oop->set_mark(mw.set_narrow_klass(nk));
413   } else {
414     fake_oop->set_mark(mw);
415     fake_oop->set_narrow_klass(nk);
416   }
417 }
418 
419 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
420   oop _src_obj;
421   address _buffered_obj;
422   CHeapBitMap* _oopmap;
423   bool _is_java_lang_ref;
424 
425 public:
426   EmbeddedOopMapper(oop src_obj, address buffered_obj, CHeapBitMap* oopmap)
427     : _src_obj(src_obj),
428       _buffered_obj(buffered_obj),
429       _oopmap(oopmap),
430       _is_java_lang_ref(AOTReferenceObjSupport::check_if_ref_obj(src_obj)) {}
431 
432   void do_oop(narrowOop *p) { EmbeddedOopMapper::do_oop_work(p); }
433   void do_oop(      oop *p) { EmbeddedOopMapper::do_oop_work(p); }
434 
435 private:
436   template <typename T>
437   void do_oop_work(T *p) {
438     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
439     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
440     AOTStreamedHeapWriter::map_oop_field_in_buffer<T>(obj, (T*)(_buffered_obj + field_offset), _oopmap);
441   }
442 };
443 
444 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
445   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
446   size_t start = bitmap->find_first_set_bit(0);
447   size_t end = bitmap->size();
448   log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
449                 start, end,
450                 start * 100 / total_bits,
451                 end * 100 / total_bits,
452                 (end - start) * 100 / total_bits);
453 }
454 
455 // Update all oop fields embedded in the buffered objects
456 void AOTStreamedHeapWriter::map_embedded_oops(ArchiveStreamedHeapInfo* heap_info) {
457   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
458   size_t heap_region_byte_size = _buffer_used;
459   heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
460 
461   for (int i = 0; i < _source_objs->length(); i++) {
462     oop src_obj = _source_objs->at(i);
463     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
464     assert(info != nullptr, "must be");
465     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
466 
467     update_header_for_buffered_addr(buffered_obj, src_obj, src_obj->klass());
468 
469     EmbeddedOopMapper mapper(src_obj, buffered_obj, heap_info->oopmap());
470     src_obj->oop_iterate(&mapper);
471     HeapShared::remap_dumped_metadata(src_obj, buffered_obj);
472   };
473 
474   size_t total_bytes = (size_t)_buffer->length();
475   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / oopmap_unit);
476 }
477 
478 size_t AOTStreamedHeapWriter::source_obj_to_buffered_offset(oop src_obj) {
479   HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
480   return p->buffer_offset();
481 }
482 
483 address AOTStreamedHeapWriter::source_obj_to_buffered_addr(oop src_obj) {
484   return offset_to_buffered_address<address>(source_obj_to_buffered_offset(src_obj));
485 }
486 
487 oop AOTStreamedHeapWriter::buffered_offset_to_source_obj(size_t buffered_offset) {
488   OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_offset);
489   if (oh != nullptr) {
490     return oh->resolve();
491   } else {
492     return nullptr;
493   }
494 }
495 
496 oop AOTStreamedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
497   return buffered_offset_to_source_obj(buffered_address_to_offset(buffered_addr));
498 }
499 
500 void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo* info) {
501   assert(!info->is_used(), "only set once");
502 
503   size_t heap_region_byte_size = _buffer_used;
504   assert(heap_region_byte_size > 0, "must archived at least one object!");
505 
506   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
507                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
508   info->set_roots_offset(_roots_offset);
509   info->set_num_roots((size_t)HeapShared::pending_roots()->length());
510   info->set_forwarding_offset(_forwarding_offset);
511   info->set_root_highest_object_index_table_offset(_root_highest_object_index_table_offset);
512   info->set_num_archived_objects((size_t)_source_objs->length());
513 }
514 
515 AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStreamedHeapInfo* heap_info) {
516   class StreamedWriterOopIterator : public AOTMapLogger::OopDataIterator {
517   private:
518     int _current;
519     int _next;
520 
521     address _buffer_start;
522 
523     int _num_archived_objects;
524     int _num_archived_roots;
525     int* _roots;
526 
527   public:
528     StreamedWriterOopIterator(address buffer_start,
529                               int num_archived_objects,
530                               int num_archived_roots,
531                               int* roots)
532       : _current(0),
533         _next(1),
534         _buffer_start(buffer_start),
535         _num_archived_objects(num_archived_objects),
536         _num_archived_roots(num_archived_roots),
537         _roots(roots) {
538     }
539 
540     AOTMapLogger::OopData capture(int dfs_index) {
541       size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
542       address buffered_addr = _buffer_start + buffered_offset;
543       oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
544       assert(src_obj != nullptr, "why is this null?");
545       oopDesc* raw_oop = (oopDesc*)buffered_addr;
546       Klass* klass = src_obj->klass();
547       size_t size = src_obj->size();
548 
549       intptr_t target_location = (intptr_t)buffered_offset;
550       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
551 
552       address requested_addr = (address)buffered_offset;
553 
554       return { buffered_addr,
555                requested_addr,
556                target_location,
557                narrow_location,
558                raw_oop,
559                klass,
560                size,
561                false };
562     }
563 
564     bool has_next() override {
565       return _next <= _num_archived_objects;
566     }
567 
568     AOTMapLogger::OopData next() override {
569       _current = _next;
570       AOTMapLogger::OopData result = capture(_current);
571       _next = _current + 1;
572       return result;
573     }
574 
575     AOTMapLogger::OopData obj_at(narrowOop* addr) override {
576       int dfs_index = (int)(*addr);
577       if (dfs_index == 0) {
578         return null_data();
579       } else {
580         return capture(dfs_index);
581       }
582     }
583 
584     AOTMapLogger::OopData obj_at(oop* addr) override {
585       int dfs_index = (int)cast_from_oop<uintptr_t>(*addr);
586       if (dfs_index == 0) {
587         return null_data();
588       } else {
589         return capture(dfs_index);
590       }
591     }
592 
593     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
594       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
595 
596       for (int i = 0; i < _num_archived_roots; ++i) {
597         int object_index = _roots[i];
598         result->append(capture(object_index));
599       }
600 
601       return result;
602     }
603   };
604 
605   MemRegion r = heap_info->buffer_region();
606   address buffer_start = address(r.start());
607 
608   size_t roots_offset = heap_info->roots_offset();
609   int* roots = ((int*)(buffer_start + roots_offset)) + 1;
610 
611   return new StreamedWriterOopIterator(buffer_start, (int)heap_info->num_archived_objects(), (int)heap_info->num_roots(), roots);
612 }
613 
614 #endif // INCLUDE_CDS_JAVA_HEAP