1 /*
2 * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMetaspace.hpp"
26 #include "cds/aotStreamedHeapLoader.hpp"
27 #include "cds/aotThread.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "classfile/classLoaderDataShared.hpp"
32 #include "classfile/javaClasses.inline.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "classfile/vmClasses.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/oopStorage.inline.hpp"
37 #include "gc/shared/oopStorageSet.inline.hpp"
38 #include "logging/log.hpp"
39 #include "memory/iterator.inline.hpp"
40 #include "memory/oopFactory.hpp"
41 #include "oops/access.inline.hpp"
42 #include "oops/objArrayOop.inline.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "oops/oopCast.inline.hpp"
45 #include "runtime/globals.hpp"
46 #include "runtime/globals_extension.hpp"
47 #include "runtime/handles.inline.hpp"
48 #include "runtime/java.hpp"
49 #include "runtime/mutex.hpp"
50 #include "runtime/thread.hpp"
51 #include "utilities/bitMap.inline.hpp"
52 #include "utilities/exceptions.hpp"
53 #include "utilities/globalDefinitions.hpp"
54 #include "utilities/stack.inline.hpp"
55 #include "utilities/ticks.hpp"
56
57 #include <type_traits>
58
59 #if INCLUDE_CDS_JAVA_HEAP
60
61 FileMapRegion* AOTStreamedHeapLoader::_heap_region;
62 FileMapRegion* AOTStreamedHeapLoader::_bitmap_region;
63 int* AOTStreamedHeapLoader::_roots_archive;
64 OopHandle AOTStreamedHeapLoader::_roots;
65 BitMapView AOTStreamedHeapLoader::_oopmap;
66 bool AOTStreamedHeapLoader::_is_in_use;
67 int AOTStreamedHeapLoader::_previous_batch_last_object_index;
68 int AOTStreamedHeapLoader::_current_batch_last_object_index;
69 int AOTStreamedHeapLoader::_current_root_index;
70 size_t AOTStreamedHeapLoader::_allocated_words;
71 bool AOTStreamedHeapLoader::_allow_gc;
72 bool AOTStreamedHeapLoader::_objects_are_handles;
73 size_t AOTStreamedHeapLoader::_num_archived_objects;
74 int AOTStreamedHeapLoader::_num_roots;
75 size_t AOTStreamedHeapLoader::_heap_region_used;
76 bool AOTStreamedHeapLoader::_loading_all_objects;
77
78 size_t* AOTStreamedHeapLoader::_object_index_to_buffer_offset_table;
79 void** AOTStreamedHeapLoader::_object_index_to_heap_object_table;
80 int* AOTStreamedHeapLoader::_root_highest_object_index_table;
81
82 bool AOTStreamedHeapLoader::_waiting_for_iterator;
83 bool AOTStreamedHeapLoader::_swapping_root_format;
84
85 static uint64_t _early_materialization_time_ns = 0;
86 static uint64_t _late_materialization_time_ns = 0;
87 static uint64_t _final_materialization_time_ns = 0;
88 static uint64_t _cleanup_materialization_time_ns = 0;
89 static volatile uint64_t _accumulated_lazy_materialization_time_ns = 0;
90 static Ticks _materialization_start_ticks;
91
92 int AOTStreamedHeapLoader::object_index_for_root_index(int root_index) {
93 return _roots_archive[root_index];
94 }
95
96 int AOTStreamedHeapLoader::highest_object_index_for_root_index(int root_index) {
97 return _root_highest_object_index_table[root_index];
98 }
99
100 size_t AOTStreamedHeapLoader::buffer_offset_for_object_index(int object_index) {
101 return _object_index_to_buffer_offset_table[object_index];
102 }
103
104 oopDesc* AOTStreamedHeapLoader::archive_object_for_object_index(int object_index) {
105 size_t buffer_offset = buffer_offset_for_object_index(object_index);
106 address bottom = (address)_heap_region->mapped_base();
107 return (oopDesc*)(bottom + buffer_offset);
108 }
109
110 size_t AOTStreamedHeapLoader::buffer_offset_for_archive_object(oopDesc* archive_object) {
111 address bottom = (address)_heap_region->mapped_base();
112 return size_t(archive_object) - size_t(bottom);
113 }
114
115 template <bool use_coops>
116 BitMap::idx_t AOTStreamedHeapLoader::obj_bit_idx_for_buffer_offset(size_t buffer_offset) {
117 if constexpr (use_coops) {
118 return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
119 } else {
120 return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
121 }
122 }
123
124 oop AOTStreamedHeapLoader::heap_object_for_object_index(int object_index) {
125 assert(object_index >= 0 && object_index <= (int)_num_archived_objects,
126 "Heap object reference out of index: %d", object_index);
127
128 if (_objects_are_handles) {
129 oop* handle = (oop*)_object_index_to_heap_object_table[object_index];
130 if (handle == nullptr) {
131 return nullptr;
132 }
133 return NativeAccess<>::oop_load(handle);
134 } else {
135 return cast_to_oop(_object_index_to_heap_object_table[object_index]);
136 }
137 }
138
139 void AOTStreamedHeapLoader::set_heap_object_for_object_index(int object_index, oop heap_object) {
140 assert(heap_object_for_object_index(object_index) == nullptr, "Should only set once with this API");
141 if (_objects_are_handles) {
142 oop* handle = Universe::vm_global()->allocate();
143 NativeAccess<>::oop_store(handle, heap_object);
144 _object_index_to_heap_object_table[object_index] = (void*)handle;
145 } else {
146 _object_index_to_heap_object_table[object_index] = cast_from_oop<void*>(heap_object);
147 }
148 }
149
150 int AOTStreamedHeapLoader::archived_string_value_object_index(oopDesc* archive_object) {
151 assert(archive_object->klass() == vmClasses::String_klass(), "Must be an archived string");
152 address archive_string_value_addr = (address)archive_object + java_lang_String::value_offset();
153 return UseCompressedOops ? *(int*)archive_string_value_addr : (int)*(int64_t*)archive_string_value_addr;
154 }
155
156 static int archive_array_length(oopDesc* archive_array) {
157 return *(int*)(address(archive_array) + arrayOopDesc::length_offset_in_bytes());
158 }
159
160 static size_t archive_object_size(oopDesc* archive_object) {
161 Klass* klass = archive_object->klass();
162 int lh = klass->layout_helper();
163
164 if (Klass::layout_helper_is_instance(lh)) {
165 // Instance
166 if (Klass::layout_helper_needs_slow_path(lh)) {
167 return ((size_t*)(archive_object))[-1];
168 } else {
169 return (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
170 }
171 } else if (Klass::layout_helper_is_array(lh)) {
172 // Array
173 size_t size_in_bytes;
174 size_t array_length = (size_t)archive_array_length(archive_object);
175 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
176 size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
177
178 return align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
179 } else {
180 // Other
181 return ((size_t*)(archive_object))[-1];
182 }
183 }
184
185 oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
186 assert(!archive_object->is_stackChunk(), "no such objects are archived");
187
188 NoJvmtiEventsMark njem;
189 oop heap_object;
190
191 Klass* klass = archive_object->klass();
192 if (klass->is_mirror_instance_klass()) {
193 heap_object = Universe::heap()->class_allocate(klass, size, CHECK_NULL);
194 } else if (klass->is_instance_klass()) {
195 heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
196 } else {
197 assert(klass->is_array_klass(), "must be");
198 int length = archive_array_length(archive_object);
199 bool do_zero = klass->is_objArray_klass();
200 heap_object = Universe::heap()->array_allocate(klass, size, length, do_zero, CHECK_NULL);
201 }
202
203 heap_object->set_mark(mark);
204
205 return heap_object;
206 }
207
208 void AOTStreamedHeapLoader::install_root(int root_index, oop heap_object) {
209 objArrayOop roots = objArrayOop(_roots.resolve());
210 OrderAccess::release(); // Once the store below publishes an object, it can be concurrently picked up by another thread without using the lock
211 roots->obj_at_put(root_index, heap_object);
212 }
213
214 void AOTStreamedHeapLoader::TracingObjectLoader::wait_for_iterator() {
215 if (JavaThread::current()->is_active_Java_thread()) {
216 // When the main thread has bootstrapped past the point of allowing safepoints,
217 // we can and indeed have to use safepoint checking waiting.
218 AOTHeapLoading_lock->wait();
219 } else {
220 // If we have no bootstrapped the main thread far enough, then we cannot and
221 // indeed also don't need to perform safepoint checking waiting.
222 AOTHeapLoading_lock->wait_without_safepoint_check();
223 }
224 }
225
226 // Link object after copying in-place
227 template <typename LinkerT>
228 class AOTStreamedHeapLoader::InPlaceLinkingOopClosure : public BasicOopIterateClosure {
229 private:
230 oop _obj;
231 LinkerT _linker;
232
233 public:
234 InPlaceLinkingOopClosure(oop obj, LinkerT linker)
235 : _obj(obj),
236 _linker(linker) {
237 }
238
239 virtual void do_oop(oop* p) { do_oop_work(p, (int)*(intptr_t*)p); }
240 virtual void do_oop(narrowOop* p) { do_oop_work(p, *(int*)p); }
241
242 template <typename T>
243 void do_oop_work(T* p, int object_index) {
244 int p_offset = pointer_delta_as_int((address)p, cast_from_oop<address>(_obj));
245 oop pointee = _linker(p_offset, object_index);
246 if (pointee != nullptr) {
247 _obj->obj_field_put_access<IS_DEST_UNINITIALIZED>((int)p_offset, pointee);
248 }
249 }
250 };
251
252 template <bool use_coops, typename LinkerT>
253 void AOTStreamedHeapLoader::copy_payload_carefully(oopDesc* archive_object,
254 oop heap_object,
255 BitMap::idx_t header_bit,
256 BitMap::idx_t start_bit,
257 BitMap::idx_t end_bit,
258 LinkerT linker) {
259 using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
260 using OopElementT = std::conditional_t<use_coops, narrowOop, oop>;
261
262 BitMap::idx_t unfinished_bit = start_bit;
263 BitMap::idx_t next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
264
265 // Fill in heap object bytes
266 while (unfinished_bit < end_bit) {
267 assert(unfinished_bit >= start_bit && unfinished_bit < end_bit, "out of bounds copying");
268
269 // This is the address of the pointee inside the input stream
270 size_t payload_offset = unfinished_bit - header_bit;
271 RawElementT* archive_payload_addr = ((RawElementT*)archive_object) + payload_offset;
272 RawElementT* heap_payload_addr = cast_from_oop<RawElementT*>(heap_object) + payload_offset;
273
274 assert(heap_payload_addr >= cast_from_oop<RawElementT*>(heap_object) &&
275 (HeapWord*)heap_payload_addr < cast_from_oop<HeapWord*>(heap_object) + heap_object->size(),
276 "Out of bounds copying");
277
278 if (next_reference_bit > unfinished_bit) {
279 // Primitive bytes available
280 size_t primitive_elements = next_reference_bit - unfinished_bit;
281 size_t primitive_bytes = primitive_elements * sizeof(RawElementT);
282 ::memcpy(heap_payload_addr, archive_payload_addr, primitive_bytes);
283
284 unfinished_bit = next_reference_bit;
285 } else {
286 // Encountered reference
287 RawElementT* archive_p = (RawElementT*)archive_payload_addr;
288 OopElementT* heap_p = (OopElementT*)heap_payload_addr;
289 int pointee_object_index = (int)*archive_p;
290 int heap_p_offset = pointer_delta_as_int((address)heap_p, cast_from_oop<address>(heap_object));
291
292 // The object index is retrieved from the archive, not the heap object. This is
293 // important after GC is enabled. Concurrent GC threads may scan references in the
294 // heap for various reasons after this point. Therefore, it is not okay to first copy
295 // the object index from a reference location in the archived object payload to a
296 // corresponding location in the heap object payload, and then fix it up afterwards to
297 // refer to a heap object. This is why this code iterates carefully over object references
298 // in the archived object, linking them one by one, without clobbering the reference
299 // locations in the heap objects with anything other than transitions from null to the
300 // intended linked object.
301 oop obj = linker(heap_p_offset, pointee_object_index);
302 if (obj != nullptr) {
303 heap_object->obj_field_put(heap_p_offset, obj);
304 }
305
306 unfinished_bit++;
307 next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
308 }
309 }
310 }
311
312 template <bool use_coops, typename LinkerT>
313 void AOTStreamedHeapLoader::copy_object_impl(oopDesc* archive_object,
314 oop heap_object,
315 size_t size,
316 LinkerT linker) {
317 if (!_allow_gc) {
318 // Without concurrent GC running, we can copy incorrect object references
319 // and metadata references into the heap object and then fix them up in-place.
320 size_t payload_size = size - 1;
321 HeapWord* archive_start = ((HeapWord*)archive_object) + 1;
322 HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object) + 1;
323
324 Copy::disjoint_words(archive_start, heap_start, payload_size);
325
326 // In-place linking fixes up object indices from references of the heap object,
327 // and patches them up to refer to objects. This can be done because we just copied
328 // the payload of the object from the archive to the heap object, including the
329 // reference object indices. However, this is only okay to do before the GC can run.
330 // A concurrent GC thread might racingly read the object payload after GC is enabled.
331 InPlaceLinkingOopClosure cl(heap_object, linker);
332 heap_object->oop_iterate(&cl);
333 HeapShared::remap_loaded_metadata(heap_object);
334 return;
335 }
336
337 // When a concurrent GC may be running, we take care not to copy incorrect oops,
338 // narrowOops or Metadata* into the heap objects. Transitions go from 0 to the
339 // intended runtime linked values only.
340 size_t word_scale = use_coops ? 2 : 1;
341 using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
342
343 // Skip the markWord; it is set at allocation time
344 size_t header_size = word_scale;
345
346 size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
347 const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
348 const BitMap::idx_t start_bit = header_bit + header_size;
349 const BitMap::idx_t end_bit = header_bit + size * word_scale;
350
351 BitMap::idx_t curr_bit = start_bit;
352
353 // We are a bit paranoid about GC or other safepointing operations observing
354 // shady metadata fields from the archive that do not point at real metadata.
355 // We deal with this by explicitly reading the requested address from the
356 // archive and fixing it to real Metadata before writing it into the heap object.
357 HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
358 BitMap::idx_t metadata_field_idx = header_bit + (size_t)metadata_offset / sizeof(RawElementT);
359 BitMap::idx_t skip = word_scale;
360 assert(metadata_field_idx >= start_bit && metadata_field_idx + skip <= end_bit,
361 "Metadata field out of bounds");
362
363 // Copy payload before metadata field
364 copy_payload_carefully<use_coops>(archive_object,
365 heap_object,
366 header_bit,
367 curr_bit,
368 metadata_field_idx,
369 linker);
370
371 // Copy metadata field
372 Metadata* const archive_metadata = *(Metadata**)(uintptr_t(archive_object) + (size_t)metadata_offset);
373 Metadata* const runtime_metadata = archive_metadata != nullptr
374 ? (Metadata*)(address(archive_metadata) + AOTMetaspace::relocation_delta())
375 : nullptr;
376 assert(runtime_metadata == nullptr || AOTMetaspace::in_aot_cache(runtime_metadata), "Invalid metadata pointer");
377 DEBUG_ONLY(Metadata* const previous_metadata = heap_object->metadata_field(metadata_offset);)
378 assert(previous_metadata == nullptr || previous_metadata == runtime_metadata, "Should not observe transient values");
379 heap_object->metadata_field_put(metadata_offset, runtime_metadata);
380 curr_bit = metadata_field_idx + skip;
381 });
382
383 // Copy trailing metadata after the last metadata word. This is usually doing
384 // all the copying.
385 copy_payload_carefully<use_coops>(archive_object,
386 heap_object,
387 header_bit,
388 curr_bit,
389 end_bit,
390 linker);
391 }
392
393 void AOTStreamedHeapLoader::copy_object_eager_linking(oopDesc* archive_object, oop heap_object, size_t size) {
394 auto linker = [&](int p_offset, int pointee_object_index) {
395 oop obj = AOTStreamedHeapLoader::heap_object_for_object_index(pointee_object_index);
396 assert(pointee_object_index == 0 || obj != nullptr, "Eager object loading should only encounter already allocated links");
397 return obj;
398 };
399 if (UseCompressedOops) {
400 copy_object_impl<true>(archive_object, heap_object, size, linker);
401 } else {
402 copy_object_impl<false>(archive_object, heap_object, size, linker);
403 }
404 }
405
406 void AOTStreamedHeapLoader::TracingObjectLoader::copy_object_lazy_linking(int object_index,
407 oopDesc* archive_object,
408 oop heap_object,
409 size_t size,
410 Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack) {
411 auto linker = [&](int p_offset, int pointee_object_index) {
412 dfs_stack.push({pointee_object_index, object_index, p_offset});
413
414 // The tracing linker is a bit lazy and mutates the reference fields in its traversal.
415 // Returning null means don't link now.
416 return oop(nullptr);
417 };
418 if (UseCompressedOops) {
419 copy_object_impl<true>(archive_object, heap_object, size, linker);
420 } else {
421 copy_object_impl<false>(archive_object, heap_object, size, linker);
422 }
423 }
424
425 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_inner(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
426 // Allocate object
427 oopDesc* archive_object = archive_object_for_object_index(object_index);
428 size_t size = archive_object_size(archive_object);
429 markWord mark = archive_object->mark();
430
431 // The markWord is marked if the object is a String and it should be interned,
432 // make sure to unmark it before allocating memory for the object.
433 bool string_intern = mark.is_marked();
434 mark = mark.set_unmarked();
435
436 oop heap_object;
437
438 if (string_intern) {
439 int value_object_index = archived_string_value_object_index(archive_object);
440
441 // Materialize the value object.
442 (void)materialize_object(value_object_index, dfs_stack, CHECK_NULL);
443
444 // Allocate and link the string.
445 heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
446 copy_object_eager_linking(archive_object, heap_object, size);
447
448 assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
449
450 // Replace the string with interned string
451 heap_object = StringTable::intern(heap_object, CHECK_NULL);
452 } else {
453 heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
454
455 // Fill in object contents
456 copy_object_lazy_linking(object_index, archive_object, heap_object, size, dfs_stack);
457 }
458
459 // Install forwarding
460 set_heap_object_for_object_index(object_index, heap_object);
461
462 return heap_object;
463 }
464
465 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
466 if (object_index <= _previous_batch_last_object_index) {
467 // The transitive closure of this object has been materialized; no need to do anything
468 return heap_object_for_object_index(object_index);
469 }
470
471 if (object_index <= _current_batch_last_object_index) {
472 // The AOTThread is currently materializing this object and its transitive closure; only need to wait for it to complete
473 _waiting_for_iterator = true;
474 while (object_index > _previous_batch_last_object_index) {
475 wait_for_iterator();
476 }
477 _waiting_for_iterator = false;
478
479 // Notify the AOT thread if it is waiting for tracing to finish
480 AOTHeapLoading_lock->notify_all();
481 return heap_object_for_object_index(object_index);;
482 }
483
484 oop heap_object = heap_object_for_object_index(object_index);
485 if (heap_object != nullptr) {
486 // Already materialized by mutator
487 return heap_object;
488 }
489
490 return materialize_object_inner(object_index, dfs_stack, THREAD);
491 }
492
493 void AOTStreamedHeapLoader::TracingObjectLoader::drain_stack(Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
494 while (!dfs_stack.is_empty()) {
495 AOTHeapTraversalEntry entry = dfs_stack.pop();
496 int pointee_object_index = entry._pointee_object_index;
497 oop pointee_heap_object = materialize_object(pointee_object_index, dfs_stack, CHECK);
498 oop heap_object = heap_object_for_object_index(entry._base_object_index);
499 if (_allow_gc) {
500 heap_object->obj_field_put(entry._heap_field_offset_bytes, pointee_heap_object);
501 } else {
502 heap_object->obj_field_put_access<IS_DEST_UNINITIALIZED>(entry._heap_field_offset_bytes, pointee_heap_object);
503 }
504 }
505 }
506
507 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_transitive(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
508 assert_locked_or_safepoint(AOTHeapLoading_lock);
509 while (_waiting_for_iterator) {
510 wait_for_iterator();
511 }
512
513 auto handlized_materialize_object = [&](TRAPS) {
514 oop obj = materialize_object(object_index, dfs_stack, CHECK_(Handle()));
515 return Handle(THREAD, obj);
516 };
517
518 Handle result = handlized_materialize_object(CHECK_NULL);
519 drain_stack(dfs_stack, CHECK_NULL);
520
521 return result();
522 }
523
524 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_root(int root_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
525 int root_object_index = object_index_for_root_index(root_index);
526 oop root = materialize_object_transitive(root_object_index, dfs_stack, CHECK_NULL);
527 install_root(root_index, root);
528
529 return root;
530 }
531
532 int oop_handle_cmp(const void* left, const void* right) {
533 oop* left_handle = *(oop**)left;
534 oop* right_handle = *(oop**)right;
535
536 if (right_handle > left_handle) {
537 return -1;
538 } else if (left_handle > right_handle) {
539 return 1;
540 }
541
542 return 0;
543 }
544
545 // The range is inclusive
546 void AOTStreamedHeapLoader::IterativeObjectLoader::initialize_range(int first_object_index, int last_object_index, TRAPS) {
547 for (int i = first_object_index; i <= last_object_index; ++i) {
548 oopDesc* archive_object = archive_object_for_object_index(i);
549 markWord mark = archive_object->mark();
550 bool string_intern = mark.is_marked();
551 if (string_intern) {
552 int value_object_index = archived_string_value_object_index(archive_object);
553 if (value_object_index == i + 1) {
554 // Interned strings are eagerly materialized in the allocation phase, so there is
555 // nothing else to do for interned strings here for the string nor its value array.
556 i++;
557 }
558 continue;
559 }
560 size_t size = archive_object_size(archive_object);
561 oop heap_object = heap_object_for_object_index(i);
562 copy_object_eager_linking(archive_object, heap_object, size);
563 }
564 }
565
566 // The range is inclusive
567 size_t AOTStreamedHeapLoader::IterativeObjectLoader::materialize_range(int first_object_index, int last_object_index, TRAPS) {
568 GrowableArrayCHeap<int, mtClassShared> lazy_object_indices(0);
569 size_t materialized_words = 0;
570
571 for (int i = first_object_index; i <= last_object_index; ++i) {
572 oopDesc* archive_object = archive_object_for_object_index(i);
573 markWord mark = archive_object->mark();
574
575 // The markWord is marked if the object is a String and it should be interned,
576 // make sure to unmark it before allocating memory for the object.
577 bool string_intern = mark.is_marked();
578 mark = mark.set_unmarked();
579
580 size_t size = archive_object_size(archive_object);
581 materialized_words += size;
582
583 oop heap_object = heap_object_for_object_index(i);
584 if (heap_object != nullptr) {
585 // Lazy loading has already initialized the object; we must not mutate it
586 lazy_object_indices.append(i);
587 continue;
588 }
589
590 if (!string_intern) {
591 // The normal case; no lazy loading have loaded the object yet
592 heap_object = allocate_object(archive_object, mark, size, CHECK_0);
593 set_heap_object_for_object_index(i, heap_object);
594 continue;
595 }
596
597 // Eagerly materialize interned strings to ensure that objects earlier than the string
598 // in a batch get linked to the intended interned string, and not a copy.
599 int value_object_index = archived_string_value_object_index(archive_object);
600
601 bool is_normal_interned_string = value_object_index == i + 1;
602
603 if (value_object_index < first_object_index) {
604 // If materialized in a previous batch, the value should already be allocated and initialized.
605 assert(heap_object_for_object_index(value_object_index) != nullptr, "should be materialized");
606 } else {
607 // Materialize the value object.
608 oopDesc* archive_value_object = archive_object_for_object_index(value_object_index);
609 markWord value_mark = archive_value_object->mark();
610 size_t value_size = archive_object_size(archive_value_object);
611 oop value_heap_object;
612
613 if (is_normal_interned_string) {
614 // The common case: the value is next to the string. This happens when only the interned
615 // string points to its value character array.
616 assert(value_object_index <= last_object_index, "Must be within this batch: %d <= %d", value_object_index, last_object_index);
617 value_heap_object = allocate_object(archive_value_object, value_mark, value_size, CHECK_0);
618 set_heap_object_for_object_index(value_object_index, value_heap_object);
619 materialized_words += value_size;
620 } else {
621 // In the uncommon case, multiple strings point to the value of an interned string.
622 // The string can then be earlier in the batch.
623 assert(value_object_index < i, "surprising index");
624 value_heap_object = heap_object_for_object_index(value_object_index);
625 }
626
627 copy_object_eager_linking(archive_value_object, value_heap_object, value_size);
628 }
629 // Allocate and link the string.
630 heap_object = allocate_object(archive_object, mark, size, CHECK_0);
631 copy_object_eager_linking(archive_object, heap_object, size);
632
633 assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
634
635 // Replace the string with interned string
636 heap_object = StringTable::intern(heap_object, CHECK_0);
637 set_heap_object_for_object_index(i, heap_object);
638
639 if (is_normal_interned_string) {
640 // Skip over the string value, already materialized
641 i++;
642 }
643 }
644
645 if (lazy_object_indices.is_empty()) {
646 // Normal case; no sprinkled lazy objects in the root subgraph
647 initialize_range(first_object_index, last_object_index, CHECK_0);
648 } else {
649 // The user lazy initialized some objects that are already initialized; we have to initialize around them
650 // to make sure they are not mutated.
651 int previous_object_index = first_object_index - 1; // Exclusive start of initialization slice
652 for (int i = 0; i < lazy_object_indices.length(); ++i) {
653 int lazy_object_index = lazy_object_indices.at(i);
654 int slice_start_object_index = previous_object_index;
655 int slice_end_object_index = lazy_object_index;
656
657 if (slice_end_object_index - slice_start_object_index > 1) { // Both markers are exclusive
658 initialize_range(slice_start_object_index + 1, slice_end_object_index - 1, CHECK_0);
659 }
660 previous_object_index = lazy_object_index;
661 }
662 // Process tail range
663 if (last_object_index - previous_object_index > 0) {
664 initialize_range(previous_object_index + 1, last_object_index, CHECK_0);
665 }
666 }
667
668 return materialized_words;
669 }
670
671 bool AOTStreamedHeapLoader::IterativeObjectLoader::has_more() {
672 return _current_root_index < _num_roots;
673 }
674
675 void AOTStreamedHeapLoader::IterativeObjectLoader::materialize_next_batch(TRAPS) {
676 assert(has_more(), "only materialize if there is something to materialize");
677
678 int min_batch_objects = 128;
679 int from_root_index = _current_root_index;
680 int max_to_root_index = _num_roots - 1;
681 int until_root_index = from_root_index;
682 int highest_object_index;
683
684 // Expand the batch size from one root, to N roots until we cross 128 objects in total
685 for (;;) {
686 highest_object_index = highest_object_index_for_root_index(until_root_index);
687 if (highest_object_index - _previous_batch_last_object_index >= min_batch_objects) {
688 break;
689 }
690 if (until_root_index == max_to_root_index) {
691 break;
692 }
693 until_root_index++;
694 }
695
696 oop root = nullptr;
697
698 // Materialize objects of necessary, representing the transitive closure of the root
699 if (highest_object_index > _previous_batch_last_object_index) {
700 while (_swapping_root_format) {
701 // When the roots are being upgraded to use handles, it is not safe to racingly
702 // iterate over the object; we must wait. Setting the current batch last object index
703 // to something other than the previous batch last object index indicates to the
704 // root swapping that there is current iteration ongoing.
705 AOTHeapLoading_lock->wait();
706 }
707 int first_object_index = _previous_batch_last_object_index + 1;
708 _current_batch_last_object_index = highest_object_index;
709 size_t allocated_words;
710 {
711 MutexUnlocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
712 allocated_words = materialize_range(first_object_index, highest_object_index, CHECK);
713 }
714 _allocated_words += allocated_words;
715 _previous_batch_last_object_index = _current_batch_last_object_index;
716 if (_waiting_for_iterator) {
717 // If tracer is waiting, let it know at the next point of unlocking that the root
718 // set it waited for has been processed now.
719 AOTHeapLoading_lock->notify_all();
720 }
721 }
722
723 // Install the root
724 for (int i = from_root_index; i <= until_root_index; ++i) {
725 int root_object_index = object_index_for_root_index(i);
726 root = heap_object_for_object_index(root_object_index);
727 install_root(i, root);
728 ++_current_root_index;
729 }
730 }
731
732 bool AOTStreamedHeapLoader::materialize_early(TRAPS) {
733 Ticks start = Ticks::now();
734
735 // Only help with early materialization from the AOT thread if the heap archive can be allocated
736 // without the need for a GC. Otherwise, do lazy loading until GC is enabled later in the bootstrapping.
737 size_t bootstrap_max_memory = Universe::heap()->bootstrap_max_memory();
738 size_t bootstrap_min_memory = MAX2(_heap_region_used, 2 * M);
739
740 size_t before_gc_materialize_budget_bytes = (bootstrap_max_memory > bootstrap_min_memory) ? bootstrap_max_memory - bootstrap_min_memory : 0;
741 size_t before_gc_materialize_budget_words = before_gc_materialize_budget_bytes / HeapWordSize;
742
743 log_info(aot, heap)("Max bootstrapping memory: %zuM, min bootstrapping memory: %zuM, selected budget: %zuM",
744 bootstrap_max_memory / M, bootstrap_min_memory / M, before_gc_materialize_budget_bytes / M);
745
746 while (IterativeObjectLoader::has_more()) {
747 if (_allow_gc || _allocated_words > before_gc_materialize_budget_words) {
748 log_info(aot, heap)("Early object materialization interrupted at root %d", _current_root_index);
749 break;
750 }
751
752 IterativeObjectLoader::materialize_next_batch(CHECK_false);
753 }
754
755 _early_materialization_time_ns = (Ticks::now() - start).nanoseconds();
756
757 bool finished_before_gc_allowed = !_allow_gc && !IterativeObjectLoader::has_more();
758
759 return finished_before_gc_allowed;
760 }
761
762 void AOTStreamedHeapLoader::materialize_late(TRAPS) {
763 Ticks start = Ticks::now();
764
765 // Continue materializing with GC allowed
766
767 while (IterativeObjectLoader::has_more()) {
768 IterativeObjectLoader::materialize_next_batch(CHECK);
769 }
770
771 _late_materialization_time_ns = (Ticks::now() - start).nanoseconds();
772 }
773
774 void AOTStreamedHeapLoader::cleanup() {
775 // First ensure there is no concurrent tracing going on
776 while (_waiting_for_iterator) {
777 AOTHeapLoading_lock->wait();
778 }
779
780 Ticks start = Ticks::now();
781
782 // Remove OopStorage roots
783 if (_objects_are_handles) {
784 size_t num_handles = _num_archived_objects;
785 // Skip the null entry
786 oop** handles = ((oop**)_object_index_to_heap_object_table) + 1;
787 // Sort the handles so that oop storage can release them faster
788 qsort(handles, num_handles, sizeof(oop*), (int (*)(const void*, const void*))oop_handle_cmp);
789 size_t num_null_handles = 0;
790 for (size_t handles_remaining = num_handles; handles_remaining != 0; --handles_remaining) {
791 oop* handle = handles[handles_remaining - 1];
792 if (handle == nullptr) {
793 num_null_handles = handles_remaining;
794 break;
795 }
796 NativeAccess<>::oop_store(handle, nullptr);
797 }
798 Universe::vm_global()->release(&handles[num_null_handles], num_handles - num_null_handles);
799 }
800
801 FREE_C_HEAP_ARRAY(void*, _object_index_to_heap_object_table);
802
803 // Unmap regions
804 FileMapInfo::current_info()->unmap_region(AOTMetaspace::hp);
805 FileMapInfo::current_info()->unmap_region(AOTMetaspace::bm);
806
807 _cleanup_materialization_time_ns = (Ticks::now() - start).nanoseconds();
808
809 log_statistics();
810 }
811
812 void AOTStreamedHeapLoader::log_statistics() {
813 uint64_t total_duration_us = (Ticks::now() - _materialization_start_ticks).microseconds();
814 const bool is_async = _loading_all_objects && !AOTEagerlyLoadObjects;
815 const char* const async_or_sync = is_async ? "async" : "sync";
816 log_info(aot, heap)("start to finish materialization time: " UINT64_FORMAT "us",
817 total_duration_us);
818 log_info(aot, heap)("early object materialization time (%s): " UINT64_FORMAT "us",
819 async_or_sync, _early_materialization_time_ns / 1000);
820 log_info(aot, heap)("late object materialization time (%s): " UINT64_FORMAT "us",
821 async_or_sync, _late_materialization_time_ns / 1000);
822 log_info(aot, heap)("object materialization cleanup time (%s): " UINT64_FORMAT "us",
823 async_or_sync, _cleanup_materialization_time_ns / 1000);
824 log_info(aot, heap)("final object materialization time stall (sync): " UINT64_FORMAT "us",
825 _final_materialization_time_ns / 1000);
826 log_info(aot, heap)("bootstrapping lazy materialization time (sync): " UINT64_FORMAT "us",
827 _accumulated_lazy_materialization_time_ns / 1000);
828
829 uint64_t sync_time = _final_materialization_time_ns + _accumulated_lazy_materialization_time_ns;
830 uint64_t async_time = _early_materialization_time_ns + _late_materialization_time_ns + _cleanup_materialization_time_ns;
831
832 if (!is_async) {
833 sync_time += async_time;
834 async_time = 0;
835 }
836
837 log_info(aot, heap)("sync materialization time: " UINT64_FORMAT "us",
838 sync_time / 1000);
839
840 log_info(aot, heap)("async materialization time: " UINT64_FORMAT "us",
841 async_time / 1000);
842
843 uint64_t iterative_time = (uint64_t)(is_async ? async_time : sync_time);
844 uint64_t materialized_bytes = _allocated_words * HeapWordSize;
845 log_info(aot, heap)("%s materialized " UINT64_FORMAT "K (" UINT64_FORMAT "M/s)", async_or_sync,
846 materialized_bytes / 1024, uint64_t(materialized_bytes * UCONST64(1'000'000'000) / M / iterative_time));
847 }
848
849 void AOTStreamedHeapLoader::materialize_objects() {
850 // We cannot handle any exception when materializing roots. Exits the VM.
851 EXCEPTION_MARK
852
853 // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
854 HandleMark hm(THREAD);
855
856 // Early materialization with a budget before GC is allowed
857 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
858
859 materialize_early(CHECK);
860 await_gc_enabled();
861 materialize_late(CHECK);
862 // Notify materialization is done
863 AOTHeapLoading_lock->notify_all();
864 cleanup();
865 }
866
867 void AOTStreamedHeapLoader::switch_object_index_to_handle(int object_index) {
868 oop heap_object = cast_to_oop(_object_index_to_heap_object_table[object_index]);
869 if (heap_object == nullptr) {
870 return;
871 }
872
873 oop* handle = Universe::vm_global()->allocate();
874 NativeAccess<>::oop_store(handle, heap_object);
875 _object_index_to_heap_object_table[object_index] = handle;
876 }
877
878 void AOTStreamedHeapLoader::enable_gc() {
879 if (AOTEagerlyLoadObjects && !IterativeObjectLoader::has_more()) {
880 // Everything was loaded eagerly at early startup
881 return;
882 }
883
884 // We cannot handle any exception when materializing roots. Exits the VM.
885 EXCEPTION_MARK
886
887 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
888
889 // First wait until no tracing is active
890 while (_waiting_for_iterator) {
891 AOTHeapLoading_lock->wait();
892 }
893
894 // Lock further tracing from starting
895 _waiting_for_iterator = true;
896
897 // Record iterator progress
898 int num_handles = (int)_num_archived_objects;
899
900 // Lock further iteration from starting
901 _swapping_root_format = true;
902
903 // Then wait for the iterator to stop
904 while (_previous_batch_last_object_index != _current_batch_last_object_index) {
905 AOTHeapLoading_lock->wait();
906 }
907
908 if (IterativeObjectLoader::has_more()) {
909 // If there is more to be materialized, we have to upgrade the object index
910 // to object mapping to use handles. If there isn't more to materialize, the
911 // handle will no longer e used; they are only used to materialize objects.
912
913 for (int i = 1; i <= num_handles; ++i) {
914 // Upgrade the roots to use handles
915 switch_object_index_to_handle(i);
916 }
917
918 // From now on, accessing the object table must be done through a handle.
919 _objects_are_handles = true;
920 }
921
922 // Unlock tracing
923 _waiting_for_iterator = false;
924
925 // Unlock iteration
926 _swapping_root_format = false;
927
928 _allow_gc = true;
929
930 AOTHeapLoading_lock->notify_all();
931
932 if (AOTEagerlyLoadObjects && IterativeObjectLoader::has_more()) {
933 materialize_late(CHECK);
934 cleanup();
935 }
936 }
937
938 void AOTStreamedHeapLoader::materialize_thread_object() {
939 AOTThread::materialize_thread_object();
940 }
941
942 void AOTStreamedHeapLoader::finish_materialize_objects() {
943 Ticks start = Ticks::now();
944
945 if (_loading_all_objects) {
946 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
947 // Wait for the AOT thread to finish
948 while (IterativeObjectLoader::has_more()) {
949 AOTHeapLoading_lock->wait();
950 }
951 } else {
952 assert(!AOTEagerlyLoadObjects, "sanity");
953 assert(_current_root_index == 0, "sanity");
954 // Without the full module graph we have done only lazy tracing materialization.
955 // Ensure all roots are processed here by triggering root loading on every root.
956 for (int i = 0; i < _num_roots; ++i) {
957 get_root(i);
958 }
959 cleanup();
960 }
961
962 _final_materialization_time_ns = (Ticks::now() - start).nanoseconds();
963 }
964
965 void account_lazy_materialization_time_ns(uint64_t time, const char* description, int index) {
966 AtomicAccess::add(&_accumulated_lazy_materialization_time_ns, time);
967 log_debug(aot, heap)("Lazy materialization of %s: %d end (" UINT64_FORMAT " us of " UINT64_FORMAT " us)", description, index, time / 1000, _accumulated_lazy_materialization_time_ns / 1000);
968 }
969
970 // Initialize an empty array of AOT heap roots; materialize them lazily
971 void AOTStreamedHeapLoader::initialize() {
972 EXCEPTION_MARK
973
974 _materialization_start_ticks = Ticks::now();
975
976 FileMapInfo::current_info()->map_bitmap_region();
977
978 _heap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
979 _bitmap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::bm);
980
981 assert(_heap_region->used() > 0, "empty heap archive?");
982
983 _is_in_use = true;
984
985 // archived roots are at this offset in the stream.
986 size_t roots_offset = FileMapInfo::current_info()->streamed_heap()->roots_offset();
987 size_t forwarding_offset = FileMapInfo::current_info()->streamed_heap()->forwarding_offset();
988 size_t root_highest_object_index_table_offset = FileMapInfo::current_info()->streamed_heap()->root_highest_object_index_table_offset();
989 _num_archived_objects = FileMapInfo::current_info()->streamed_heap()->num_archived_objects();
990
991 // The first int is the length of the array
992 _roots_archive = ((int*)(((address)_heap_region->mapped_base()) + roots_offset)) + 1;
993 _num_roots = _roots_archive[-1];
994 _heap_region_used = _heap_region->used();
995
996 // We can't retire a TLAB until the filler klass is set; set it to the archived object klass.
997 CollectedHeap::set_filler_object_klass(vmClasses::Object_klass());
998
999 objArrayOop roots = oopFactory::new_objectArray(_num_roots, CHECK);
1000 _roots = OopHandle(Universe::vm_global(), roots);
1001
1002 _object_index_to_buffer_offset_table = (size_t*)(((address)_heap_region->mapped_base()) + forwarding_offset);
1003 // We allocate the first entry for "null"
1004 _object_index_to_heap_object_table = NEW_C_HEAP_ARRAY(void*, _num_archived_objects + 1, mtClassShared);
1005 Copy::zero_to_bytes(_object_index_to_heap_object_table, (_num_archived_objects + 1) * sizeof(void*));
1006
1007 _root_highest_object_index_table = (int*)(((address)_heap_region->mapped_base()) + root_highest_object_index_table_offset);
1008
1009 address start = (address)(_bitmap_region->mapped_base()) + _heap_region->oopmap_offset();
1010 _oopmap = BitMapView((BitMap::bm_word_t*)start, _heap_region->oopmap_size_in_bits());
1011
1012
1013 if (FLAG_IS_DEFAULT(AOTEagerlyLoadObjects)) {
1014 // Concurrency will not help much if there are no extra cores available.
1015 FLAG_SET_ERGO(AOTEagerlyLoadObjects, os::initial_active_processor_count() <= 1);
1016 }
1017
1018 // If the full module graph is not available or the JVMTI class file load hook is on, we
1019 // will prune the object graph to not include cached objects in subgraphs that are not intended
1020 // to be loaded.
1021 _loading_all_objects = CDSConfig::is_using_full_module_graph() && !JvmtiExport::should_post_class_file_load_hook();
1022 if (!_loading_all_objects) {
1023 // When not using FMG, fall back to tracing materialization
1024 FLAG_SET_ERGO(AOTEagerlyLoadObjects, false);
1025 return;
1026 }
1027
1028 if (AOTEagerlyLoadObjects) {
1029 // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
1030 HandleMark hm(THREAD);
1031
1032 // Early materialization with a budget before GC is allowed
1033 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1034
1035 bool finished_before_gc_allowed = materialize_early(CHECK);
1036 if (finished_before_gc_allowed) {
1037 cleanup();
1038 }
1039 } else {
1040 AOTThread::initialize();
1041 }
1042 }
1043
1044 oop AOTStreamedHeapLoader::materialize_root(int root_index) {
1045 Ticks start = Ticks::now();
1046 // We cannot handle any exception when materializing a root. Exits the VM.
1047 EXCEPTION_MARK
1048 Stack<AOTHeapTraversalEntry, mtClassShared> dfs_stack;
1049 HandleMark hm(THREAD);
1050
1051 oop result;
1052 {
1053 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1054
1055 oop root = oop_cast<refArrayOop>(_roots.resolve())->obj_at(root_index);
1056
1057 if (root != nullptr) {
1058 // The root has already been materialized
1059 result = root;
1060 } else {
1061 // The root has not been materialized, start tracing materialization
1062 result = TracingObjectLoader::materialize_root(root_index, dfs_stack, CHECK_NULL);
1063 }
1064 }
1065
1066 uint64_t duration = (Ticks::now() - start).nanoseconds();
1067
1068 account_lazy_materialization_time_ns(duration, "root", root_index);
1069
1070 return result;
1071 }
1072
1073 oop AOTStreamedHeapLoader::get_root(int index) {
1074 oop result = oop_cast<refArrayOop>(_roots.resolve())->obj_at(index);
1075 if (result == nullptr) {
1076 // Materialize root
1077 result = materialize_root(index);
1078 }
1079 if (result == _roots.resolve()) {
1080 // A self-reference to the roots array acts as a sentinel object for null,
1081 // indicating that the root has been cleared.
1082 result = nullptr;
1083 }
1084 // Acquire the root transitive object payload
1085 OrderAccess::acquire();
1086 return result;
1087 }
1088
1089 void AOTStreamedHeapLoader::clear_root(int index) {
1090 // Self-reference to the roots array acts as a sentinel object for null,
1091 // indicating that the root has been cleared.
1092 objArrayOop(_roots.resolve())->obj_at_put(index, _roots.resolve());
1093 }
1094
1095 void AOTStreamedHeapLoader::await_gc_enabled() {
1096 while (!_allow_gc) {
1097 AOTHeapLoading_lock->wait();
1098 }
1099 }
1100
1101 void AOTStreamedHeapLoader::finish_initialization(FileMapInfo* static_mapinfo) {
1102 static_mapinfo->stream_heap_region();
1103 }
1104
1105 AOTMapLogger::OopDataIterator* AOTStreamedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
1106 class StreamedLoaderOopIterator : public AOTStreamedHeapOopIterator {
1107 public:
1108 StreamedLoaderOopIterator(address buffer_start,
1109 int num_archived_objects)
1110 : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects) {}
1111
1112 AOTMapLogger::OopData capture(int dfs_index) override {
1113 size_t buffered_offset = buffer_offset_for_object_index(dfs_index);
1114 address buffered_addr = _buffer_start + buffered_offset;
1115 oopDesc* raw_oop = (oopDesc*)buffered_addr;
1116 size_t size = archive_object_size(raw_oop);
1117
1118 intptr_t target_location = (intptr_t)buffered_offset;
1119 uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
1120 Klass* klass = raw_oop->klass();
1121
1122 address requested_addr = (address)buffered_offset;
1123
1124 return { buffered_addr,
1125 requested_addr,
1126 target_location,
1127 narrow_location,
1128 raw_oop,
1129 klass,
1130 size,
1131 false };
1132 }
1133
1134 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
1135 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
1136
1137 for (int i = 0; i < _num_roots; ++i) {
1138 int object_index = object_index_for_root_index(i);
1139 result->append(capture(object_index));
1140 }
1141
1142 return result;
1143 }
1144 };
1145
1146 assert(_is_in_use, "printing before initializing?");
1147
1148 return new StreamedLoaderOopIterator(buffer_start, (int)info->streamed_heap()->num_archived_objects());
1149 }
1150
1151 #endif // INCLUDE_CDS_JAVA_HEAP