1 /*
  2  * Copyright (c) 2025, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotReferenceObjSupport.hpp"
 26 #include "cds/aotStreamedHeapWriter.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/filemap.hpp"
 29 #include "cds/heapShared.inline.hpp"
 30 #include "cds/regeneratedClasses.hpp"
 31 #include "classfile/modules.hpp"
 32 #include "classfile/stringTable.hpp"
 33 #include "classfile/systemDictionary.hpp"
 34 #include "gc/shared/collectedHeap.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/oopFactory.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/compressedOops.hpp"
 39 #include "oops/objArrayOop.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "oops/oopHandle.inline.hpp"
 42 #include "oops/typeArrayKlass.hpp"
 43 #include "oops/typeArrayOop.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "utilities/bitMap.inline.hpp"
 47 #include "utilities/stack.inline.hpp"
 48 
 49 #if INCLUDE_CDS_JAVA_HEAP
 50 
 51 GrowableArrayCHeap<u1, mtClassShared>* AOTStreamedHeapWriter::_buffer = nullptr;
 52 
 53 // The following are offsets from buffer_bottom()
 54 size_t AOTStreamedHeapWriter::_buffer_used;
 55 size_t AOTStreamedHeapWriter::_roots_offset;
 56 size_t AOTStreamedHeapWriter::_forwarding_offset;
 57 size_t AOTStreamedHeapWriter::_root_highest_object_index_table_offset;
 58 
 59 GrowableArrayCHeap<oop, mtClassShared>* AOTStreamedHeapWriter::_source_objs;
 60 
 61 AOTStreamedHeapWriter::BufferOffsetToSourceObjectTable* AOTStreamedHeapWriter::_buffer_offset_to_source_obj_table;
 62 AOTStreamedHeapWriter::SourceObjectToDFSOrderTable* AOTStreamedHeapWriter::_dfs_order_table;
 63 
 64 int* AOTStreamedHeapWriter::_roots_highest_dfs;
 65 size_t* AOTStreamedHeapWriter::_dfs_to_archive_object_table;
 66 
 67 static const int max_table_capacity = 0x3fffffff;
 68 
 69 void AOTStreamedHeapWriter::init() {
 70   if (CDSConfig::is_dumping_heap()) {
 71     _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(8, max_table_capacity);
 72 
 73     int initial_source_objs_capacity = 10000;
 74     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(initial_source_objs_capacity);
 75   }
 76 }
 77 
 78 void AOTStreamedHeapWriter::delete_tables_with_raw_oops() {
 79   delete _source_objs;
 80   _source_objs = nullptr;
 81 
 82   delete _dfs_order_table;
 83   _dfs_order_table = nullptr;
 84 }
 85 
 86 void AOTStreamedHeapWriter::add_source_obj(oop src_obj) {
 87   _source_objs->append(src_obj);
 88 }
 89 
 90 class FollowOopIterateClosure: public BasicOopIterateClosure {
 91   Stack<oop, mtClassShared>* _dfs_stack;
 92   oop _src_obj;
 93   bool _is_java_lang_ref;
 94 
 95 public:
 96   FollowOopIterateClosure(Stack<oop, mtClassShared>* dfs_stack, oop src_obj, bool is_java_lang_ref) :
 97     _dfs_stack(dfs_stack),
 98     _src_obj(src_obj),
 99     _is_java_lang_ref(is_java_lang_ref) {}
100 
101   void do_oop(narrowOop *p) { do_oop_work(p); }
102   void do_oop(      oop *p) { do_oop_work(p); }
103 
104 private:
105   template <class T> void do_oop_work(T *p) {
106     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
107     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
108     if (obj != nullptr) {
109       _dfs_stack->push(obj);
110     }
111   }
112 };
113 
114 int AOTStreamedHeapWriter::cmp_dfs_order(oop* o1, oop* o2) {
115   int* o1_dfs = _dfs_order_table->get(*o1);
116   int* o2_dfs = _dfs_order_table->get(*o2);
117   return *o1_dfs - *o2_dfs;
118 }
119 
120 void AOTStreamedHeapWriter::order_source_objs(GrowableArrayCHeap<oop, mtClassShared>* roots) {
121   Stack<oop, mtClassShared> dfs_stack;
122   _dfs_order_table = new (mtClassShared) SourceObjectToDFSOrderTable(8, max_table_capacity);
123   _roots_highest_dfs = NEW_C_HEAP_ARRAY(int, (size_t)roots->length(), mtClassShared);
124   _dfs_to_archive_object_table = NEW_C_HEAP_ARRAY(size_t, (size_t)_source_objs->length() + 1, mtClassShared);
125 
126   for (int i = 0; i < _source_objs->length(); ++i) {
127     oop obj = _source_objs->at(i);
128     _dfs_order_table->put(cast_from_oop<void*>(obj), -1);
129     _dfs_order_table->maybe_grow();
130   }
131 
132   int dfs_order = 0;
133 
134   for (int i = 0; i < roots->length(); ++i) {
135     oop root = roots->at(i);
136 
137     if (root == nullptr) {
138       log_info(aot, heap)("null root at %d", i);
139       continue;
140     }
141 
142     dfs_stack.push(root);
143 
144     while (!dfs_stack.is_empty()) {
145       oop obj = dfs_stack.pop();
146       assert(obj != nullptr, "null root");
147       int* dfs_number = _dfs_order_table->get(cast_from_oop<void*>(obj));
148       if (*dfs_number != -1) {
149         // Already visited in the traversal
150         continue;
151       }
152       _dfs_order_table->put(cast_from_oop<void*>(obj), ++dfs_order);
153       _dfs_order_table->maybe_grow();
154 
155       FollowOopIterateClosure cl(&dfs_stack, obj, AOTReferenceObjSupport::check_if_ref_obj(obj));
156       obj->oop_iterate(&cl);
157     }
158 
159     _roots_highest_dfs[i] = dfs_order;
160   }
161 
162   _source_objs->sort(cmp_dfs_order);
163 }
164 
165 void AOTStreamedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
166                                   ArchiveStreamedHeapInfo* heap_info) {
167   assert(CDSConfig::is_dumping_heap(), "sanity");
168   allocate_buffer();
169   order_source_objs(roots);
170   copy_source_objs_to_buffer(roots);
171   map_embedded_oops(heap_info);
172   populate_archive_heap_info(heap_info);
173 }
174 
175 void AOTStreamedHeapWriter::allocate_buffer() {
176   int initial_buffer_size = 100000;
177   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
178   _buffer_used = 0;
179   ensure_buffer_space(1); // so that buffer_bottom() works
180 }
181 
182 void AOTStreamedHeapWriter::ensure_buffer_space(size_t min_bytes) {
183   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
184   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
185   _buffer->at_grow(to_array_index(min_bytes));
186 }
187 
188 void AOTStreamedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
189   int length = roots->length();
190   size_t byte_size = align_up(sizeof(int) + sizeof(int) * (size_t)length, (size_t)HeapWordSize);
191 
192   size_t new_used = _buffer_used + byte_size;
193   ensure_buffer_space(new_used);
194 
195   int* mem = offset_to_buffered_address<int*>(_buffer_used);
196   memset(mem, 0, byte_size);
197   *mem = length;
198 
199   for (int i = 0; i < length; i++) {
200     // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
201     oop o = roots->at(i);
202     int dfs_index = o == nullptr ? 0 : *_dfs_order_table->get(cast_from_oop<void*>(o));
203     mem[i + 1] = dfs_index;
204   }
205   log_info(aot, heap)("archived obj roots[%d] = %zu bytes, mem = %p", length, byte_size, mem);
206 
207   _roots_offset = _buffer_used;
208   _buffer_used = new_used;
209 }
210 
211 template <typename T>
212 void AOTStreamedHeapWriter::write(T value) {
213   size_t new_used = _buffer_used + sizeof(T);
214   ensure_buffer_space(new_used);
215   T* mem = offset_to_buffered_address<T*>(_buffer_used);
216   *mem = value;
217   _buffer_used = new_used;
218 }
219 
220 void AOTStreamedHeapWriter::copy_forwarding_to_buffer() {
221   _forwarding_offset = _buffer_used;
222 
223   write<size_t>(0); // The first entry is the null entry
224 
225   // Write a mapping from object index to buffer offset
226   for (int i = 1; i <= _source_objs->length(); i++) {
227     size_t buffer_offset = _dfs_to_archive_object_table[i];
228     write(buffer_offset);
229   }
230 }
231 
232 void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
233   _root_highest_object_index_table_offset = _buffer_used;
234 
235   for (int i = 0; i < roots_length; ++i) {
236     int highest_dfs = _roots_highest_dfs[i];
237     write(highest_dfs);
238   }
239 
240   if ((roots_length % 2) != 0) {
241     write(-1); // Align up to a 64 bit word
242   }
243 }
244 
245 static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
246   if (UseCompressedOops) {
247     return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
248   } else {
249     return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
250   }
251 }
252 
253 void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
254   for (int i = 0; i < _source_objs->length(); i++) {
255     oop src_obj = _source_objs->at(i);
256     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
257     assert(info != nullptr, "must be");
258     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
259     info->set_buffer_offset(buffer_offset);
260 
261     OopHandle handle(Universe::vm_global(), src_obj);
262     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
263     _buffer_offset_to_source_obj_table->maybe_grow();
264 
265     int dfs_order = i + 1;
266     _dfs_to_archive_object_table[dfs_order] = buffer_offset;
267   }
268 
269   copy_roots_to_buffer(roots);
270   copy_forwarding_to_buffer();
271   copy_roots_max_dfs_to_buffer(roots->length());
272 
273   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots",
274                 _buffer_used, _source_objs->length() + 1, roots->length());
275 }
276 
277 template <typename T>
278 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
279   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
280   *field_addr = value;
281 }
282 
283 static bool needs_explicit_size(oop src_obj) {
284   Klass* klass = src_obj->klass();
285   int lh = klass->layout_helper();
286 
287   // Simple instances or arrays don't need explicit size
288   if (Klass::layout_helper_is_instance(lh)) {
289     return Klass::layout_helper_needs_slow_path(lh);
290   }
291 
292   return !Klass::layout_helper_is_array(lh);
293 }
294 
295 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
296   if (needs_explicit_size(src_obj)) {
297     // Explicitly write object size for more complex objects, to avoid having to
298     // pretend the buffer objects are objects when loading the objects, in order
299     // to read the size. Most of the time, the layout helper of the class is enough.
300     write<size_t>(src_obj->size());
301   }
302   size_t byte_size = src_obj->size() * HeapWordSize;
303   assert(byte_size > 0, "no zero-size objects");
304 
305   size_t new_used = _buffer_used + byte_size;
306   assert(new_used > _buffer_used, "no wrap around");
307 
308   ensure_buffer_space(new_used);
309 
310   if (HeapShared::is_interned_string(src_obj)) {
311     java_lang_String::hash_code(src_obj);                   // Sets the hash code field(s)
312     java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
313     assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
314   }
315 
316   address from = cast_from_oop<address>(src_obj);
317   address to = offset_to_buffered_address<address>(_buffer_used);
318   assert(is_object_aligned(_buffer_used), "sanity");
319   assert(is_object_aligned(byte_size), "sanity");
320   memcpy(to, from, byte_size);
321 
322   if (java_lang_Module::is_instance(src_obj)) {
323     // These native pointers will be restored explicitly at run time.
324     Modules::check_archived_module_oop(src_obj);
325     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
326   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
327 #ifdef ASSERT
328     // We only archive these loaders
329     if (src_obj != SystemDictionary::java_platform_loader() &&
330         src_obj != SystemDictionary::java_system_loader()) {
331       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
332     }
333 #endif
334     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
335   }
336 
337   size_t buffered_obj_offset = _buffer_used;
338   _buffer_used = new_used;
339 
340   return buffered_obj_offset;
341 }
342 
343 // Oop mapping
344 
345 inline void AOTStreamedHeapWriter::store_oop_in_buffer(oop* buffered_addr, int dfs_index) {
346   *(ssize_t*)buffered_addr = dfs_index;
347 }
348 
349 inline void AOTStreamedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, int dfs_index) {
350   *(int32_t*)buffered_addr = (int32_t)dfs_index;
351 }
352 
353 template <typename T> void AOTStreamedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
354   // Mark the pointer in the oopmap
355   size_t buffered_offset = buffered_address_to_offset((address)buffered_addr);
356   BitMap::idx_t idx = bit_idx_for_buffer_offset(buffered_offset);
357   oopmap->set_bit(idx);
358 }
359 
360 template <typename T> void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop obj, T* field_addr_in_buffer, CHeapBitMap* oopmap) {
361   if (obj == nullptr) {
362     store_oop_in_buffer(field_addr_in_buffer, 0);
363   } else {
364     int dfs_index = *_dfs_order_table->get(obj);
365     store_oop_in_buffer(field_addr_in_buffer, dfs_index);
366   }
367 
368   mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
369 }
370 
371 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj,  Klass* src_klass) {
372   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
373   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
374 
375   markWord mw = markWord::prototype();
376   oopDesc* fake_oop = (oopDesc*)buffered_addr;
377 
378   // We need to retain the identity_hash, because it may have been used by some hashtables
379   // in the shared heap. This also has the side effect of pre-initializing the
380   // identity_hash for all shared objects, so they are less likely to be written
381   // into during run time, increasing the potential of memory sharing.
382   if (src_obj != nullptr) {
383     intptr_t src_hash = src_obj->identity_hash();
384     mw = mw.copy_set_hash(src_hash);
385   }
386 
387   if (HeapShared::is_interned_string(src_obj)) {
388     // Mark the mark word of interned string so the loader knows to link these to
389     // the string table at runtime.
390     mw = mw.set_marked();
391   }
392 
393   if (UseCompactObjectHeaders) {
394     fake_oop->set_mark(mw.set_narrow_klass(nk));
395   } else {
396     fake_oop->set_mark(mw);
397     fake_oop->set_narrow_klass(nk);
398   }
399 }
400 
401 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
402   oop _src_obj;
403   address _buffered_obj;
404   CHeapBitMap* _oopmap;
405   bool _is_java_lang_ref;
406 
407 public:
408   EmbeddedOopMapper(oop src_obj, address buffered_obj, CHeapBitMap* oopmap)
409     : _src_obj(src_obj),
410       _buffered_obj(buffered_obj),
411       _oopmap(oopmap),
412       _is_java_lang_ref(AOTReferenceObjSupport::check_if_ref_obj(src_obj)) {}
413 
414   void do_oop(narrowOop *p) { EmbeddedOopMapper::do_oop_work(p); }
415   void do_oop(      oop *p) { EmbeddedOopMapper::do_oop_work(p); }
416 
417 private:
418   template <typename T>
419   void do_oop_work(T *p) {
420     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
421     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
422     AOTStreamedHeapWriter::map_oop_field_in_buffer<T>(obj, (T*)(_buffered_obj + field_offset), _oopmap);
423   }
424 };
425 
426 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
427   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
428   size_t start = bitmap->find_first_set_bit(0);
429   size_t end = bitmap->size();
430   log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
431                 start, end,
432                 start * 100 / total_bits,
433                 end * 100 / total_bits,
434                 (end - start) * 100 / total_bits);
435 }
436 
437 // Update all oop fields embedded in the buffered objects
438 void AOTStreamedHeapWriter::map_embedded_oops(ArchiveStreamedHeapInfo* heap_info) {
439   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
440   size_t heap_region_byte_size = _buffer_used;
441   heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
442 
443   for (int i = 0; i < _source_objs->length(); i++) {
444     oop src_obj = _source_objs->at(i);
445     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
446     assert(info != nullptr, "must be");
447     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
448 
449     update_header_for_buffered_addr(buffered_obj, src_obj, src_obj->klass());
450 
451     EmbeddedOopMapper mapper(src_obj, buffered_obj, heap_info->oopmap());
452     src_obj->oop_iterate(&mapper);
453     HeapShared::remap_dumped_metadata(src_obj, buffered_obj);
454   };
455 
456   size_t total_bytes = (size_t)_buffer->length();
457   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / oopmap_unit);
458 }
459 
460 size_t AOTStreamedHeapWriter::source_obj_to_buffered_offset(oop src_obj) {
461   HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
462   return p->buffer_offset();
463 }
464 
465 address AOTStreamedHeapWriter::source_obj_to_buffered_addr(oop src_obj) {
466   return offset_to_buffered_address<address>(source_obj_to_buffered_offset(src_obj));
467 }
468 
469 oop AOTStreamedHeapWriter::buffered_offset_to_source_obj(size_t buffered_offset) {
470   OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_offset);
471   if (oh != nullptr) {
472     return oh->resolve();
473   } else {
474     return nullptr;
475   }
476 }
477 
478 oop AOTStreamedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
479   return buffered_offset_to_source_obj(buffered_address_to_offset(buffered_addr));
480 }
481 
482 void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo* info) {
483   assert(!info->is_used(), "only set once");
484 
485   size_t heap_region_byte_size = _buffer_used;
486   assert(heap_region_byte_size > 0, "must archived at least one object!");
487 
488   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
489                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
490   info->set_roots_offset(_roots_offset);
491   info->set_num_roots((size_t)HeapShared::pending_roots()->length());
492   info->set_forwarding_offset(_forwarding_offset);
493   info->set_root_highest_object_index_table_offset(_root_highest_object_index_table_offset);
494   info->set_num_archived_objects((size_t)_source_objs->length());
495 }
496 
497 AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStreamedHeapInfo* heap_info) {
498   class StreamedWriterOopIterator : public AOTMapLogger::OopDataIterator {
499   private:
500     int _current;
501     int _next;
502 
503     address _buffer_start;
504 
505     int _num_archived_objects;
506     int _num_archived_roots;
507     int* _roots;
508 
509   public:
510     StreamedWriterOopIterator(address buffer_start,
511                               int num_archived_objects,
512                               int num_archived_roots,
513                               int* roots)
514       : _current(0),
515         _next(1),
516         _buffer_start(buffer_start),
517         _num_archived_objects(num_archived_objects),
518         _num_archived_roots(num_archived_roots),
519         _roots(roots) {
520     }
521 
522     AOTMapLogger::OopData capture(int dfs_index) {
523       size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
524       address buffered_addr = _buffer_start + buffered_offset;
525       oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
526       assert(src_obj != nullptr, "why is this null?");
527       oopDesc* raw_oop = (oopDesc*)buffered_addr;
528       Klass* klass = src_obj->klass();
529       size_t size = src_obj->size();
530 
531       intptr_t target_location = (intptr_t)buffered_offset;
532       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
533 
534       address requested_addr = (address)buffered_offset;
535 
536       return { buffered_addr,
537                requested_addr,
538                target_location,
539                narrow_location,
540                raw_oop,
541                klass,
542                size,
543                false };
544     }
545 
546     bool has_next() override {
547       return _next <= _num_archived_objects;
548     }
549 
550     AOTMapLogger::OopData next() override {
551       _current = _next;
552       AOTMapLogger::OopData result = capture(_current);
553       _next = _current + 1;
554       return result;
555     }
556 
557     AOTMapLogger::OopData obj_at(narrowOop* addr) override {
558       int dfs_index = (int)(*addr);
559       if (dfs_index == 0) {
560         return null_data();
561       } else {
562         return capture(dfs_index);
563       }
564     }
565 
566     AOTMapLogger::OopData obj_at(oop* addr) override {
567       int dfs_index = (int)cast_from_oop<uintptr_t>(*addr);
568       if (dfs_index == 0) {
569         return null_data();
570       } else {
571         return capture(dfs_index);
572       }
573     }
574 
575     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
576       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
577 
578       for (int i = 0; i < _num_archived_roots; ++i) {
579         int object_index = _roots[i];
580         result->append(capture(object_index));
581       }
582 
583       return result;
584     }
585   };
586 
587   MemRegion r = heap_info->buffer_region();
588   address buffer_start = address(r.start());
589 
590   size_t roots_offset = heap_info->roots_offset();
591   int* roots = ((int*)(buffer_start + roots_offset)) + 1;
592 
593   return new StreamedWriterOopIterator(buffer_start, (int)heap_info->num_archived_objects(), (int)heap_info->num_roots(), roots);
594 }
595 
596 #endif // INCLUDE_CDS_JAVA_HEAP