1 /*
  2  * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotReferenceObjSupport.hpp"
 26 #include "cds/aotStreamedHeapWriter.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/filemap.hpp"
 29 #include "cds/heapShared.inline.hpp"
 30 #include "cds/regeneratedClasses.hpp"
 31 #include "classfile/modules.hpp"
 32 #include "classfile/stringTable.hpp"
 33 #include "classfile/systemDictionary.hpp"
 34 #include "gc/shared/collectedHeap.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/oopFactory.hpp"
 37 #include "memory/universe.hpp"
 38 #include "oops/compressedOops.hpp"
 39 #include "oops/objArrayOop.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "oops/oopHandle.inline.hpp"
 42 #include "oops/typeArrayKlass.hpp"
 43 #include "oops/typeArrayOop.hpp"
 44 #include "runtime/java.hpp"
 45 #include "runtime/mutexLocker.hpp"
 46 #include "utilities/bitMap.inline.hpp"
 47 #include "utilities/stack.inline.hpp"
 48 
 49 #if INCLUDE_CDS_JAVA_HEAP
 50 
 51 GrowableArrayCHeap<u1, mtClassShared>* AOTStreamedHeapWriter::_buffer = nullptr;
 52 
 53 // The following are offsets from buffer_bottom()
 54 size_t AOTStreamedHeapWriter::_buffer_used;
 55 size_t AOTStreamedHeapWriter::_roots_offset;
 56 size_t AOTStreamedHeapWriter::_forwarding_offset;
 57 size_t AOTStreamedHeapWriter::_root_highest_object_index_table_offset;
 58 
 59 GrowableArrayCHeap<oop, mtClassShared>* AOTStreamedHeapWriter::_source_objs;
 60 
 61 AOTStreamedHeapWriter::BufferOffsetToSourceObjectTable* AOTStreamedHeapWriter::_buffer_offset_to_source_obj_table;
 62 AOTStreamedHeapWriter::SourceObjectToDFSOrderTable* AOTStreamedHeapWriter::_dfs_order_table;
 63 
 64 int* AOTStreamedHeapWriter::_roots_highest_dfs;
 65 size_t* AOTStreamedHeapWriter::_dfs_to_archive_object_table;
 66 
 67 static const int max_table_capacity = 0x3fffffff;
 68 
 69 void AOTStreamedHeapWriter::init() {
 70   if (CDSConfig::is_dumping_heap()) {
 71     _buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(8, max_table_capacity);
 72 
 73     int initial_source_objs_capacity = 10000;
 74     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(initial_source_objs_capacity);
 75   }
 76 }
 77 
 78 void AOTStreamedHeapWriter::delete_tables_with_raw_oops() {
 79   delete _source_objs;
 80   _source_objs = nullptr;
 81 
 82   delete _dfs_order_table;
 83   _dfs_order_table = nullptr;
 84 }
 85 
 86 void AOTStreamedHeapWriter::add_source_obj(oop src_obj) {
 87   _source_objs->append(src_obj);
 88 }
 89 
 90 class FollowOopIterateClosure: public BasicOopIterateClosure {
 91   Stack<oop, mtClassShared>* _dfs_stack;
 92   oop _src_obj;
 93   bool _is_java_lang_ref;
 94 
 95 public:
 96   FollowOopIterateClosure(Stack<oop, mtClassShared>* dfs_stack, oop src_obj, bool is_java_lang_ref) :
 97     _dfs_stack(dfs_stack),
 98     _src_obj(src_obj),
 99     _is_java_lang_ref(is_java_lang_ref) {}
100 
101   void do_oop(narrowOop *p) { do_oop_work(p); }
102   void do_oop(      oop *p) { do_oop_work(p); }
103 
104 private:
105   template <class T> void do_oop_work(T *p) {
106     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
107     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
108     if (obj != nullptr) {
109       _dfs_stack->push(obj);
110     }
111   }
112 };
113 
114 int AOTStreamedHeapWriter::cmp_dfs_order(oop* o1, oop* o2) {
115   int* o1_dfs = _dfs_order_table->get(*o1);
116   int* o2_dfs = _dfs_order_table->get(*o2);
117   return *o1_dfs - *o2_dfs;
118 }
119 
120 void AOTStreamedHeapWriter::order_source_objs(GrowableArrayCHeap<oop, mtClassShared>* roots) {
121   Stack<oop, mtClassShared> dfs_stack;
122   _dfs_order_table = new (mtClassShared) SourceObjectToDFSOrderTable(8, max_table_capacity);
123   _roots_highest_dfs = NEW_C_HEAP_ARRAY(int, (size_t)roots->length(), mtClassShared);
124   _dfs_to_archive_object_table = NEW_C_HEAP_ARRAY(size_t, (size_t)_source_objs->length() + 1, mtClassShared);
125 
126   for (int i = 0; i < _source_objs->length(); ++i) {
127     oop obj = _source_objs->at(i);
128     _dfs_order_table->put(cast_from_oop<void*>(obj), -1);
129     _dfs_order_table->maybe_grow();
130   }
131 
132   int dfs_order = 0;
133 
134   for (int i = 0; i < roots->length(); ++i) {
135     oop root = roots->at(i);
136 
137     if (root == nullptr) {
138       log_info(aot, heap)("null root at %d", i);
139       continue;
140     }
141 
142     dfs_stack.push(root);
143 
144     while (!dfs_stack.is_empty()) {
145       oop obj = dfs_stack.pop();
146       assert(obj != nullptr, "null root");
147       int* dfs_number = _dfs_order_table->get(cast_from_oop<void*>(obj));
148       if (*dfs_number != -1) {
149         // Already visited in the traversal
150         continue;
151       }
152       _dfs_order_table->put(cast_from_oop<void*>(obj), ++dfs_order);
153       _dfs_order_table->maybe_grow();
154 
155       FollowOopIterateClosure cl(&dfs_stack, obj, AOTReferenceObjSupport::check_if_ref_obj(obj));
156       obj->oop_iterate(&cl);
157     }
158 
159     _roots_highest_dfs[i] = dfs_order;
160   }
161 
162   _source_objs->sort(cmp_dfs_order);
163 }
164 
165 void AOTStreamedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
166                                   ArchiveStreamedHeapInfo* heap_info) {
167   assert(CDSConfig::is_dumping_heap(), "sanity");
168   allocate_buffer();
169   order_source_objs(roots);
170   copy_source_objs_to_buffer(roots);
171   map_embedded_oops(heap_info);
172   populate_archive_heap_info(heap_info);
173 }
174 
175 void AOTStreamedHeapWriter::allocate_buffer() {
176   int initial_buffer_size = 100000;
177   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
178   _buffer_used = 0;
179   ensure_buffer_space(1); // so that buffer_bottom() works
180 }
181 
182 void AOTStreamedHeapWriter::ensure_buffer_space(size_t min_bytes) {
183   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
184   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
185   _buffer->at_grow(to_array_index(min_bytes));
186 }
187 
188 void AOTStreamedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
189   int length = roots->length();
190   size_t byte_size = align_up(sizeof(int) + sizeof(int) * (size_t)length, (size_t)HeapWordSize);
191 
192   size_t new_used = _buffer_used + byte_size;
193   ensure_buffer_space(new_used);
194 
195   int* mem = offset_to_buffered_address<int*>(_buffer_used);
196   memset(mem, 0, byte_size);
197   *mem = length;
198 
199   for (int i = 0; i < length; i++) {
200     // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
201     oop o = roots->at(i);
202     int dfs_index = o == nullptr ? 0 : *_dfs_order_table->get(cast_from_oop<void*>(o));
203     mem[i + 1] = dfs_index;
204   }
205   log_info(aot, heap)("archived obj roots[%d] = %zu bytes, mem = %p", length, byte_size, mem);
206 
207   _roots_offset = _buffer_used;
208   _buffer_used = new_used;
209 }
210 
211 template <typename T>
212 void AOTStreamedHeapWriter::write(T value) {
213   size_t new_used = _buffer_used + sizeof(T);
214   ensure_buffer_space(new_used);
215   T* mem = offset_to_buffered_address<T*>(_buffer_used);
216   *mem = value;
217   _buffer_used = new_used;
218 }
219 
220 void AOTStreamedHeapWriter::copy_forwarding_to_buffer() {
221   _forwarding_offset = _buffer_used;
222 
223   write<size_t>(0); // The first entry is the null entry
224 
225   // Write a mapping from object index to buffer offset
226   for (int i = 1; i <= _source_objs->length(); i++) {
227     size_t buffer_offset = _dfs_to_archive_object_table[i];
228     write(buffer_offset);
229   }
230 }
231 
232 void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
233   _root_highest_object_index_table_offset = _buffer_used;
234 
235   for (int i = 0; i < roots_length; ++i) {
236     int highest_dfs = _roots_highest_dfs[i];
237     write(highest_dfs);
238   }
239 
240   if ((roots_length % 2) != 0) {
241     write(-1); // Align up to a 64 bit word
242   }
243 }
244 
245 static bool is_interned_string(oop obj) {
246   if (!java_lang_String::is_instance(obj)) {
247     return false;
248   }
249 
250   ResourceMark rm;
251   int len;
252   jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
253   if (name == nullptr) {
254     fatal("Insufficient memory for dumping");
255   }
256   return StringTable::lookup(name, len) == obj;
257 }
258 
259 static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
260   if (UseCompressedOops) {
261     return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
262   } else {
263     return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
264   }
265 }
266 
267 bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) {
268   return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr;
269 }
270 
271 void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
272   for (int i = 0; i < _source_objs->length(); i++) {
273     oop src_obj = _source_objs->at(i);
274     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
275     assert(info != nullptr, "must be");
276     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
277     info->set_buffer_offset(buffer_offset);
278 
279     OopHandle handle(Universe::vm_global(), src_obj);
280     _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
281     _buffer_offset_to_source_obj_table->maybe_grow();
282 
283     int dfs_order = i + 1;
284     _dfs_to_archive_object_table[dfs_order] = buffer_offset;
285   }
286 
287   copy_roots_to_buffer(roots);
288   copy_forwarding_to_buffer();
289   copy_roots_max_dfs_to_buffer(roots->length());
290 
291   log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots",
292                 _buffer_used, _source_objs->length() + 1, roots->length());
293 }
294 
295 template <typename T>
296 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
297   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
298   *field_addr = value;
299 }
300 
301 static bool needs_explicit_size(oop src_obj) {
302   Klass* klass = src_obj->klass();
303   int lh = klass->layout_helper();
304 
305   // Simple instances or arrays don't need explicit size
306   if (Klass::layout_helper_is_instance(lh)) {
307     return Klass::layout_helper_needs_slow_path(lh);
308   }
309 
310   return !Klass::layout_helper_is_array(lh);
311 }
312 
313 size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
314   size_t old_size = src_obj->size();
315   size_t size = src_obj->copy_size_cds(old_size, src_obj->mark());
316   if (needs_explicit_size(src_obj)) {
317     // Explicitly write object size for more complex objects, to avoid having to
318     // pretend the buffer objects are objects when loading the objects, in order
319     // to read the size. Most of the time, the layout helper of the class is enough.
320     write<size_t>(size);
321   }
322   size_t byte_size = size * HeapWordSize;
323   assert(byte_size > 0, "no zero-size objects");
324 
325   size_t new_used = _buffer_used + byte_size;
326   assert(new_used > _buffer_used, "no wrap around");
327 
328   ensure_buffer_space(new_used);
329 
330   if (is_interned_string(src_obj)) {
331     java_lang_String::hash_code(src_obj);                   // Sets the hash code field(s)
332     java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
333     assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
334   }
335 
336   address from = cast_from_oop<address>(src_obj);
337   address to = offset_to_buffered_address<address>(_buffer_used);
338   assert(is_object_aligned(_buffer_used), "sanity");
339   assert(is_object_aligned(byte_size), "sanity");
340   memcpy(to, from, MIN2(size, old_size) * HeapWordSize);
341 
342   if (java_lang_Module::is_instance(src_obj)) {
343     // These native pointers will be restored explicitly at run time.
344     Modules::check_archived_module_oop(src_obj);
345     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
346   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
347 #ifdef ASSERT
348     // We only archive these loaders
349     if (src_obj != SystemDictionary::java_platform_loader() &&
350         src_obj != SystemDictionary::java_system_loader()) {
351       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
352     }
353 #endif
354     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
355   }
356 
357   size_t buffered_obj_offset = _buffer_used;
358   _buffer_used = new_used;
359 
360   return buffered_obj_offset;
361 }
362 
363 // Oop mapping
364 
365 inline void AOTStreamedHeapWriter::store_oop_in_buffer(oop* buffered_addr, int dfs_index) {
366   *(ssize_t*)buffered_addr = dfs_index;
367 }
368 
369 inline void AOTStreamedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, int dfs_index) {
370   *(int32_t*)buffered_addr = (int32_t)dfs_index;
371 }
372 
373 template <typename T> void AOTStreamedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
374   // Mark the pointer in the oopmap
375   size_t buffered_offset = buffered_address_to_offset((address)buffered_addr);
376   BitMap::idx_t idx = bit_idx_for_buffer_offset(buffered_offset);
377   oopmap->set_bit(idx);
378 }
379 
380 template <typename T> void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop obj, T* field_addr_in_buffer, CHeapBitMap* oopmap) {
381   if (obj == nullptr) {
382     store_oop_in_buffer(field_addr_in_buffer, 0);
383   } else {
384     int dfs_index = *_dfs_order_table->get(obj);
385     store_oop_in_buffer(field_addr_in_buffer, dfs_index);
386   }
387 
388   mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
389 }
390 
391 void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj,  Klass* src_klass) {
392   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
393   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
394 
395   markWord mw = markWord::prototype();
396   oopDesc* fake_oop = (oopDesc*)buffered_addr;
397 
398   // We need to retain the identity_hash, because it may have been used by some hashtables
399   // in the shared heap. This also has the side effect of pre-initializing the
400   // identity_hash for all shared objects, so they are less likely to be written
401   // into during run time, increasing the potential of memory sharing.
402   if (src_obj != nullptr) {
403     if (UseCompactObjectHeaders) {
404       mw = mw.copy_hashctrl_from(src_obj->mark());
405       if (mw.is_hashed_not_expanded()) {
406         mw = fake_oop->initialize_hash_if_necessary(src_obj, src_klass, mw);
407       } else if (mw.is_not_hashed_expanded()) {
408         // If a scratch mirror class has not been hashed until now, then reset its
409         // hash bits to initial state.
410         mw = mw.set_not_hashed_not_expanded();
411       }
412     } else {
413       intptr_t src_hash = src_obj->identity_hash();
414       mw = mw.copy_set_hash(src_hash);
415     }
416   }
417 
418   if (is_interned_string(src_obj)) {
419     // Mark the mark word of interned string so the loader knows to link these to
420     // the string table at runtime.
421     mw = mw.set_marked();
422   }
423 
424   if (UseCompactObjectHeaders) {
425     fake_oop->set_mark(mw.set_narrow_klass(nk));
426   } else {
427     fake_oop->set_mark(mw);
428     fake_oop->set_narrow_klass(nk);
429   }
430 }
431 
432 class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
433   oop _src_obj;
434   address _buffered_obj;
435   CHeapBitMap* _oopmap;
436   bool _is_java_lang_ref;
437 
438 public:
439   EmbeddedOopMapper(oop src_obj, address buffered_obj, CHeapBitMap* oopmap)
440     : _src_obj(src_obj),
441       _buffered_obj(buffered_obj),
442       _oopmap(oopmap),
443       _is_java_lang_ref(AOTReferenceObjSupport::check_if_ref_obj(src_obj)) {}
444 
445   void do_oop(narrowOop *p) { EmbeddedOopMapper::do_oop_work(p); }
446   void do_oop(      oop *p) { EmbeddedOopMapper::do_oop_work(p); }
447 
448 private:
449   template <typename T>
450   void do_oop_work(T *p) {
451     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
452     oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
453     AOTStreamedHeapWriter::map_oop_field_in_buffer<T>(obj, (T*)(_buffered_obj + field_offset), _oopmap);
454   }
455 };
456 
457 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
458   // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
459   size_t start = bitmap->find_first_set_bit(0);
460   size_t end = bitmap->size();
461   log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
462                 start, end,
463                 start * 100 / total_bits,
464                 end * 100 / total_bits,
465                 (end - start) * 100 / total_bits);
466 }
467 
468 // Update all oop fields embedded in the buffered objects
469 void AOTStreamedHeapWriter::map_embedded_oops(ArchiveStreamedHeapInfo* heap_info) {
470   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
471   size_t heap_region_byte_size = _buffer_used;
472   heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
473 
474   for (int i = 0; i < _source_objs->length(); i++) {
475     oop src_obj = _source_objs->at(i);
476     HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
477     assert(info != nullptr, "must be");
478     address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
479 
480     update_header_for_buffered_addr(buffered_obj, src_obj, src_obj->klass());
481 
482     EmbeddedOopMapper mapper(src_obj, buffered_obj, heap_info->oopmap());
483     src_obj->oop_iterate(&mapper);
484     HeapShared::remap_dumped_metadata(src_obj, buffered_obj);
485   };
486 
487   size_t total_bytes = (size_t)_buffer->length();
488   log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / oopmap_unit);
489 }
490 
491 size_t AOTStreamedHeapWriter::source_obj_to_buffered_offset(oop src_obj) {
492   HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
493   return p->buffer_offset();
494 }
495 
496 address AOTStreamedHeapWriter::source_obj_to_buffered_addr(oop src_obj) {
497   return offset_to_buffered_address<address>(source_obj_to_buffered_offset(src_obj));
498 }
499 
500 oop AOTStreamedHeapWriter::buffered_offset_to_source_obj(size_t buffered_offset) {
501   OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_offset);
502   if (oh != nullptr) {
503     return oh->resolve();
504   } else {
505     return nullptr;
506   }
507 }
508 
509 oop AOTStreamedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
510   return buffered_offset_to_source_obj(buffered_address_to_offset(buffered_addr));
511 }
512 
513 void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo* info) {
514   assert(!info->is_used(), "only set once");
515 
516   size_t heap_region_byte_size = _buffer_used;
517   assert(heap_region_byte_size > 0, "must archived at least one object!");
518 
519   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
520                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
521   info->set_roots_offset(_roots_offset);
522   info->set_num_roots((size_t)HeapShared::pending_roots()->length());
523   info->set_forwarding_offset(_forwarding_offset);
524   info->set_root_highest_object_index_table_offset(_root_highest_object_index_table_offset);
525   info->set_num_archived_objects((size_t)_source_objs->length());
526 }
527 
528 AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStreamedHeapInfo* heap_info) {
529   class StreamedWriterOopIterator : public AOTMapLogger::OopDataIterator {
530   private:
531     int _current;
532     int _next;
533 
534     address _buffer_start;
535 
536     int _num_archived_objects;
537     int _num_archived_roots;
538     int* _roots;
539 
540   public:
541     StreamedWriterOopIterator(address buffer_start,
542                               int num_archived_objects,
543                               int num_archived_roots,
544                               int* roots)
545       : _current(0),
546         _next(1),
547         _buffer_start(buffer_start),
548         _num_archived_objects(num_archived_objects),
549         _num_archived_roots(num_archived_roots),
550         _roots(roots) {
551     }
552 
553     AOTMapLogger::OopData capture(int dfs_index) {
554       size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
555       address buffered_addr = _buffer_start + buffered_offset;
556       oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
557       assert(src_obj != nullptr, "why is this null?");
558       oopDesc* raw_oop = (oopDesc*)buffered_addr;
559       Klass* klass = src_obj->klass();
560       size_t size = src_obj->size();
561       size = src_obj->copy_size_cds(size, src_obj->mark());
562 
563       intptr_t target_location = (intptr_t)buffered_offset;
564       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
565 
566       address requested_addr = (address)buffered_offset;
567 
568       return { buffered_addr,
569                requested_addr,
570                target_location,
571                narrow_location,
572                raw_oop,
573                klass,
574                size,
575                false };
576     }
577 
578     bool has_next() override {
579       return _next <= _num_archived_objects;
580     }
581 
582     AOTMapLogger::OopData next() override {
583       _current = _next;
584       AOTMapLogger::OopData result = capture(_current);
585       _next = _current + 1;
586       return result;
587     }
588 
589     AOTMapLogger::OopData obj_at(narrowOop* addr) override {
590       int dfs_index = (int)(*addr);
591       if (dfs_index == 0) {
592         return null_data();
593       } else {
594         return capture(dfs_index);
595       }
596     }
597 
598     AOTMapLogger::OopData obj_at(oop* addr) override {
599       int dfs_index = (int)cast_from_oop<uintptr_t>(*addr);
600       if (dfs_index == 0) {
601         return null_data();
602       } else {
603         return capture(dfs_index);
604       }
605     }
606 
607     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
608       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
609 
610       for (int i = 0; i < _num_archived_roots; ++i) {
611         int object_index = _roots[i];
612         result->append(capture(object_index));
613       }
614 
615       return result;
616     }
617   };
618 
619   MemRegion r = heap_info->buffer_region();
620   address buffer_start = address(r.start());
621 
622   size_t roots_offset = heap_info->roots_offset();
623   int* roots = ((int*)(buffer_start + roots_offset)) + 1;
624 
625   return new StreamedWriterOopIterator(buffer_start, (int)heap_info->num_archived_objects(), (int)heap_info->num_roots(), roots);
626 }
627 
628 #endif // INCLUDE_CDS_JAVA_HEAP