1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMetaspace.hpp"
26 #include "cds/aotStreamedHeapLoader.hpp"
27 #include "cds/aotThread.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "classfile/classLoaderDataShared.hpp"
32 #include "classfile/javaClasses.inline.hpp"
33 #include "classfile/stringTable.hpp"
34 #include "classfile/vmClasses.hpp"
35 #include "gc/shared/collectedHeap.inline.hpp"
36 #include "gc/shared/oopStorage.inline.hpp"
37 #include "gc/shared/oopStorageSet.inline.hpp"
38 #include "logging/log.hpp"
39 #include "memory/iterator.inline.hpp"
40 #include "memory/oopFactory.hpp"
41 #include "oops/access.inline.hpp"
42 #include "oops/objArrayOop.inline.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "runtime/globals.hpp"
45 #include "runtime/globals_extension.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutex.hpp"
49 #include "runtime/thread.hpp"
50 #include "utilities/bitMap.inline.hpp"
51 #include "utilities/exceptions.hpp"
52 #include "utilities/globalDefinitions.hpp"
53 #include "utilities/stack.inline.hpp"
54 #include "utilities/ticks.hpp"
55
56 #include <type_traits>
57
58 #if INCLUDE_CDS_JAVA_HEAP
59
60 FileMapRegion* AOTStreamedHeapLoader::_heap_region;
61 FileMapRegion* AOTStreamedHeapLoader::_bitmap_region;
62 int* AOTStreamedHeapLoader::_roots_archive;
63 OopHandle AOTStreamedHeapLoader::_roots;
64 BitMapView AOTStreamedHeapLoader::_oopmap;
65 bool AOTStreamedHeapLoader::_is_in_use;
66 int AOTStreamedHeapLoader::_previous_batch_last_object_index;
67 int AOTStreamedHeapLoader::_current_batch_last_object_index;
68 int AOTStreamedHeapLoader::_current_root_index;
69 size_t AOTStreamedHeapLoader::_allocated_words;
70 bool AOTStreamedHeapLoader::_allow_gc;
71 bool AOTStreamedHeapLoader::_objects_are_handles;
72 size_t AOTStreamedHeapLoader::_num_archived_objects;
73 int AOTStreamedHeapLoader::_num_roots;
74 size_t AOTStreamedHeapLoader::_heap_region_used;
75 bool AOTStreamedHeapLoader::_loading_all_objects;
76
77 size_t* AOTStreamedHeapLoader::_object_index_to_buffer_offset_table;
78 void** AOTStreamedHeapLoader::_object_index_to_heap_object_table;
79 int* AOTStreamedHeapLoader::_root_highest_object_index_table;
80
81 bool AOTStreamedHeapLoader::_waiting_for_iterator;
82 bool AOTStreamedHeapLoader::_swapping_root_format;
83
84 static uint64_t _early_materialization_time_ns = 0;
85 static uint64_t _late_materialization_time_ns = 0;
86 static uint64_t _final_materialization_time_ns = 0;
87 static uint64_t _cleanup_materialization_time_ns = 0;
88 static volatile uint64_t _accumulated_lazy_materialization_time_ns = 0;
89 static Ticks _materialization_start_ticks;
90
91 int AOTStreamedHeapLoader::object_index_for_root_index(int root_index) {
92 return _roots_archive[root_index];
93 }
94
95 int AOTStreamedHeapLoader::highest_object_index_for_root_index(int root_index) {
96 return _root_highest_object_index_table[root_index];
97 }
98
99 size_t AOTStreamedHeapLoader::buffer_offset_for_object_index(int object_index) {
100 return _object_index_to_buffer_offset_table[object_index];
101 }
102
103 oopDesc* AOTStreamedHeapLoader::archive_object_for_object_index(int object_index) {
104 size_t buffer_offset = buffer_offset_for_object_index(object_index);
105 address bottom = (address)_heap_region->mapped_base();
106 return (oopDesc*)(bottom + buffer_offset);
107 }
108
109 size_t AOTStreamedHeapLoader::buffer_offset_for_archive_object(oopDesc* archive_object) {
110 address bottom = (address)_heap_region->mapped_base();
111 return size_t(archive_object) - size_t(bottom);
112 }
113
114 template <bool use_coops>
115 BitMap::idx_t AOTStreamedHeapLoader::obj_bit_idx_for_buffer_offset(size_t buffer_offset) {
116 if constexpr (use_coops) {
117 return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
118 } else {
119 return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
120 }
121 }
122
123 oop AOTStreamedHeapLoader::heap_object_for_object_index(int object_index) {
124 assert(object_index >= 0 && object_index <= (int)_num_archived_objects,
125 "Heap object reference out of index: %d", object_index);
126
127 if (_objects_are_handles) {
128 oop* handle = (oop*)_object_index_to_heap_object_table[object_index];
129 if (handle == nullptr) {
130 return nullptr;
131 }
132 return NativeAccess<>::oop_load(handle);
133 } else {
134 return cast_to_oop(_object_index_to_heap_object_table[object_index]);
135 }
136 }
137
138 void AOTStreamedHeapLoader::set_heap_object_for_object_index(int object_index, oop heap_object) {
139 assert(heap_object_for_object_index(object_index) == nullptr, "Should only set once with this API");
140 if (_objects_are_handles) {
141 oop* handle = Universe::vm_global()->allocate();
142 NativeAccess<>::oop_store(handle, heap_object);
143 _object_index_to_heap_object_table[object_index] = (void*)handle;
144 } else {
145 _object_index_to_heap_object_table[object_index] = cast_from_oop<void*>(heap_object);
146 }
147 }
148
149 int AOTStreamedHeapLoader::archived_string_value_object_index(oopDesc* archive_object) {
150 assert(archive_object->klass() == vmClasses::String_klass(), "Must be an archived string");
151 address archive_string_value_addr = (address)archive_object + java_lang_String::value_offset();
152 return UseCompressedOops ? *(int*)archive_string_value_addr : (int)*(int64_t*)archive_string_value_addr;
153 }
154
155 static int archive_array_length(oopDesc* archive_array) {
156 return *(int*)(address(archive_array) + arrayOopDesc::length_offset_in_bytes());
157 }
158
159 static size_t archive_object_size(oopDesc* archive_object) {
160 Klass* klass = archive_object->klass();
161 int lh = klass->layout_helper();
162
163 if (Klass::layout_helper_is_instance(lh)) {
164 // Instance
165 if (Klass::layout_helper_needs_slow_path(lh)) {
166 return ((size_t*)(archive_object))[-1];
167 } else {
168 size_t size = (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
169 if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
170 size = align_object_size(size + 1);
171 }
172 return size;
173 }
174 } else if (Klass::layout_helper_is_array(lh)) {
175 // Array
176 size_t size_in_bytes;
177 size_t array_length = (size_t)archive_array_length(archive_object);
178 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
179 size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
180
181 size_t size = align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
182 if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
183 size = align_object_size(size + 1);
184 }
185 return size;
186 } else {
187 // Other
188 return ((size_t*)(archive_object))[-1];
189 }
190 }
191
192 oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
193 assert(!archive_object->is_stackChunk(), "no such objects are archived");
194
195 oop heap_object;
196
197 Klass* klass = archive_object->klass();
198 assert(!(UseCompactObjectHeaders && mark.is_hashed_not_expanded()), "Must not be hashed/not-expanded");
199 if (klass->is_mirror_instance_klass()) {
200 size_t base_size = size;
201 assert(!(UseCompactObjectHeaders && mark.is_not_hashed_expanded()), "should not happen");
202 heap_object = Universe::heap()->class_allocate(klass, size, base_size, CHECK_NULL);
203 } else if (klass->is_instance_klass()) {
204 heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
205 } else {
206 assert(klass->is_array_klass(), "must be");
207 int length = archive_array_length(archive_object);
208 bool do_zero = klass->is_objArray_klass();
209 heap_object = Universe::heap()->array_allocate(klass, size, length, do_zero, CHECK_NULL);
210 }
211
212 heap_object->set_mark(mark);
213
214 return heap_object;
215 }
216
217 void AOTStreamedHeapLoader::install_root(int root_index, oop heap_object) {
218 objArrayOop roots = objArrayOop(_roots.resolve());
219 OrderAccess::release(); // Once the store below publishes an object, it can be concurrently picked up by another thread without using the lock
220 roots->obj_at_put(root_index, heap_object);
221 }
222
223 void AOTStreamedHeapLoader::TracingObjectLoader::wait_for_iterator() {
224 if (JavaThread::current()->is_active_Java_thread()) {
225 // When the main thread has bootstrapped past the point of allowing safepoints,
226 // we can and indeed have to use safepoint checking waiting.
227 AOTHeapLoading_lock->wait();
228 } else {
229 // If we have no bootstrapped the main thread far enough, then we cannot and
230 // indeed also don't need to perform safepoint checking waiting.
231 AOTHeapLoading_lock->wait_without_safepoint_check();
232 }
233 }
234
235 // Link object after copying in-place
236 template <typename LinkerT>
237 class AOTStreamedHeapLoader::InPlaceLinkingOopClosure : public BasicOopIterateClosure {
238 private:
239 oop _obj;
240 LinkerT _linker;
241
242 public:
243 InPlaceLinkingOopClosure(oop obj, LinkerT linker)
244 : _obj(obj),
245 _linker(linker) {
246 }
247
248 virtual void do_oop(oop* p) { do_oop_work(p, (int)*(intptr_t*)p); }
249 virtual void do_oop(narrowOop* p) { do_oop_work(p, *(int*)p); }
250
251 template <typename T>
252 void do_oop_work(T* p, int object_index) {
253 int p_offset = pointer_delta_as_int((address)p, cast_from_oop<address>(_obj));
254 oop pointee = _linker(p_offset, object_index);
255 if (pointee != nullptr) {
256 _obj->obj_field_put_access<IS_DEST_UNINITIALIZED>((int)p_offset, pointee);
257 }
258 }
259 };
260
261 template <bool use_coops, typename LinkerT>
262 void AOTStreamedHeapLoader::copy_payload_carefully(oopDesc* archive_object,
263 oop heap_object,
264 BitMap::idx_t header_bit,
265 BitMap::idx_t start_bit,
266 BitMap::idx_t end_bit,
267 LinkerT linker) {
268 using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
269 using OopElementT = std::conditional_t<use_coops, narrowOop, oop>;
270
271 BitMap::idx_t unfinished_bit = start_bit;
272 BitMap::idx_t next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
273
274 // Fill in heap object bytes
275 while (unfinished_bit < end_bit) {
276 assert(unfinished_bit >= start_bit && unfinished_bit < end_bit, "out of bounds copying");
277
278 // This is the address of the pointee inside the input stream
279 size_t payload_offset = unfinished_bit - header_bit;
280 RawElementT* archive_payload_addr = ((RawElementT*)archive_object) + payload_offset;
281 RawElementT* heap_payload_addr = cast_from_oop<RawElementT*>(heap_object) + payload_offset;
282
283 assert(heap_payload_addr >= cast_from_oop<RawElementT*>(heap_object) &&
284 (HeapWord*)heap_payload_addr < cast_from_oop<HeapWord*>(heap_object) + heap_object->size(),
285 "Out of bounds copying");
286
287 if (next_reference_bit > unfinished_bit) {
288 // Primitive bytes available
289 size_t primitive_elements = next_reference_bit - unfinished_bit;
290 size_t primitive_bytes = primitive_elements * sizeof(RawElementT);
291 ::memcpy(heap_payload_addr, archive_payload_addr, primitive_bytes);
292
293 unfinished_bit = next_reference_bit;
294 } else {
295 // Encountered reference
296 RawElementT* archive_p = (RawElementT*)archive_payload_addr;
297 OopElementT* heap_p = (OopElementT*)heap_payload_addr;
298 int pointee_object_index = (int)*archive_p;
299 int heap_p_offset = pointer_delta_as_int((address)heap_p, cast_from_oop<address>(heap_object));
300
301 // The object index is retrieved from the archive, not the heap object. This is
302 // important after GC is enabled. Concurrent GC threads may scan references in the
303 // heap for various reasons after this point. Therefore, it is not okay to first copy
304 // the object index from a reference location in the archived object payload to a
305 // corresponding location in the heap object payload, and then fix it up afterwards to
306 // refer to a heap object. This is why this code iterates carefully over object references
307 // in the archived object, linking them one by one, without clobbering the reference
308 // locations in the heap objects with anything other than transitions from null to the
309 // intended linked object.
310 oop obj = linker(heap_p_offset, pointee_object_index);
311 if (obj != nullptr) {
312 heap_object->obj_field_put(heap_p_offset, obj);
313 }
314
315 unfinished_bit++;
316 next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
317 }
318 }
319 }
320
321 template <bool use_coops, typename LinkerT>
322 void AOTStreamedHeapLoader::copy_object_impl(oopDesc* archive_object,
323 oop heap_object,
324 size_t size,
325 LinkerT linker) {
326 if (!_allow_gc) {
327 // Without concurrent GC running, we can copy incorrect object references
328 // and metadata references into the heap object and then fix them up in-place.
329 size_t offset = 1;
330 size_t payload_size = size - offset;
331 HeapWord* archive_start = ((HeapWord*)archive_object);
332 HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
333
334 Copy::disjoint_words(archive_start + offset, heap_start + offset, payload_size);
335
336 if (UseCompactObjectHeaders) {
337 // The copying might have missed the first 4 bytes of payload/arraylength, copy that also.
338 *(reinterpret_cast<jint*>(heap_start) + 1) = *(reinterpret_cast<jint*>(archive_start) + 1);
339 }
340
341 // In-place linking fixes up object indices from references of the heap object,
342 // and patches them up to refer to objects. This can be done because we just copied
343 // the payload of the object from the archive to the heap object, including the
344 // reference object indices. However, this is only okay to do before the GC can run.
345 // A concurrent GC thread might racingly read the object payload after GC is enabled.
346 InPlaceLinkingOopClosure cl(heap_object, linker);
347 heap_object->oop_iterate(&cl);
348 HeapShared::remap_loaded_metadata(heap_object);
349 return;
350 }
351
352 // When a concurrent GC may be running, we take care not to copy incorrect oops,
353 // narrowOops or Metadata* into the heap objects. Transitions go from 0 to the
354 // intended runtime linked values only.
355 size_t word_scale = use_coops ? 2 : 1;
356 using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
357
358 // Skip the markWord; it is set at allocation time
359 size_t header_size = (UseCompactObjectHeaders && use_coops) ? 1 : word_scale;
360
361 size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
362 const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
363 const BitMap::idx_t start_bit = header_bit + header_size;
364 const BitMap::idx_t end_bit = header_bit + size * word_scale;
365
366 BitMap::idx_t curr_bit = start_bit;
367
368 if (UseCompactObjectHeaders && !use_coops) {
369 // Copy first 4 primitive bytes.
370 jint* archive_start = reinterpret_cast<jint*>(archive_object);
371 HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
372 *(reinterpret_cast<jint*>(heap_start) + 1) = *(archive_start + 1);
373 }
374
375 // We are a bit paranoid about GC or other safepointing operations observing
376 // shady metadata fields from the archive that do not point at real metadata.
377 // We deal with this by explicitly reading the requested address from the
378 // archive and fixing it to real Metadata before writing it into the heap object.
379 HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
380 BitMap::idx_t metadata_field_idx = header_bit + (size_t)metadata_offset / sizeof(RawElementT);
381 BitMap::idx_t skip = word_scale;
382 assert(metadata_field_idx >= start_bit && metadata_field_idx + skip <= end_bit,
383 "Metadata field out of bounds");
384
385 // Copy payload before metadata field
386 copy_payload_carefully<use_coops>(archive_object,
387 heap_object,
388 header_bit,
389 curr_bit,
390 metadata_field_idx,
391 linker);
392
393 // Copy metadata field
394 Metadata* const archive_metadata = *(Metadata**)(uintptr_t(archive_object) + (size_t)metadata_offset);
395 Metadata* const runtime_metadata = archive_metadata != nullptr
396 ? (Metadata*)(address(archive_metadata) + AOTMetaspace::relocation_delta())
397 : nullptr;
398 assert(runtime_metadata == nullptr || AOTMetaspace::in_aot_cache(runtime_metadata), "Invalid metadata pointer");
399 DEBUG_ONLY(Metadata* const previous_metadata = heap_object->metadata_field(metadata_offset);)
400 assert(previous_metadata == nullptr || previous_metadata == runtime_metadata, "Should not observe transient values");
401 heap_object->metadata_field_put(metadata_offset, runtime_metadata);
402 curr_bit = metadata_field_idx + skip;
403 });
404
405 // Copy trailing metadata after the last metadata word. This is usually doing
406 // all the copying.
407 copy_payload_carefully<use_coops>(archive_object,
408 heap_object,
409 header_bit,
410 curr_bit,
411 end_bit,
412 linker);
413 }
414
415 void AOTStreamedHeapLoader::copy_object_eager_linking(oopDesc* archive_object, oop heap_object, size_t size) {
416 auto linker = [&](int p_offset, int pointee_object_index) {
417 oop obj = AOTStreamedHeapLoader::heap_object_for_object_index(pointee_object_index);
418 assert(pointee_object_index == 0 || obj != nullptr, "Eager object loading should only encounter already allocated links");
419 return obj;
420 };
421 if (UseCompressedOops) {
422 copy_object_impl<true>(archive_object, heap_object, size, linker);
423 } else {
424 copy_object_impl<false>(archive_object, heap_object, size, linker);
425 }
426 }
427
428 void AOTStreamedHeapLoader::TracingObjectLoader::copy_object_lazy_linking(int object_index,
429 oopDesc* archive_object,
430 oop heap_object,
431 size_t size,
432 Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack) {
433 auto linker = [&](int p_offset, int pointee_object_index) {
434 dfs_stack.push({pointee_object_index, object_index, p_offset});
435
436 // The tracing linker is a bit lazy and mutates the reference fields in its traversal.
437 // Returning null means don't link now.
438 return oop(nullptr);
439 };
440 if (UseCompressedOops) {
441 copy_object_impl<true>(archive_object, heap_object, size, linker);
442 } else {
443 copy_object_impl<false>(archive_object, heap_object, size, linker);
444 }
445 }
446
447 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_inner(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
448 // Allocate object
449 oopDesc* archive_object = archive_object_for_object_index(object_index);
450 size_t size = archive_object_size(archive_object);
451 markWord mark = archive_object->mark();
452
453 // The markWord is marked if the object is a String and it should be interned,
454 // make sure to unmark it before allocating memory for the object.
455 bool string_intern = mark.is_marked();
456 mark = mark.set_unmarked();
457
458 oop heap_object;
459
460 if (string_intern) {
461 int value_object_index = archived_string_value_object_index(archive_object);
462
463 // Materialize the value object.
464 (void)materialize_object(value_object_index, dfs_stack, CHECK_NULL);
465
466 // Allocate and link the string.
467 heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
468 copy_object_eager_linking(archive_object, heap_object, size);
469
470 assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
471
472 // Replace the string with interned string
473 heap_object = StringTable::intern(heap_object, CHECK_NULL);
474 } else {
475 heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
476
477 // Fill in object contents
478 copy_object_lazy_linking(object_index, archive_object, heap_object, size, dfs_stack);
479 }
480
481 // Install forwarding
482 set_heap_object_for_object_index(object_index, heap_object);
483
484 return heap_object;
485 }
486
487 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
488 if (object_index <= _previous_batch_last_object_index) {
489 // The transitive closure of this object has been materialized; no need to do anything
490 return heap_object_for_object_index(object_index);
491 }
492
493 if (object_index <= _current_batch_last_object_index) {
494 // The AOTThread is currently materializing this object and its transitive closure; only need to wait for it to complete
495 _waiting_for_iterator = true;
496 while (object_index > _previous_batch_last_object_index) {
497 wait_for_iterator();
498 }
499 _waiting_for_iterator = false;
500
501 // Notify the AOT thread if it is waiting for tracing to finish
502 AOTHeapLoading_lock->notify_all();
503 return heap_object_for_object_index(object_index);;
504 }
505
506 oop heap_object = heap_object_for_object_index(object_index);
507 if (heap_object != nullptr) {
508 // Already materialized by mutator
509 return heap_object;
510 }
511
512 return materialize_object_inner(object_index, dfs_stack, THREAD);
513 }
514
515 void AOTStreamedHeapLoader::TracingObjectLoader::drain_stack(Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
516 while (!dfs_stack.is_empty()) {
517 AOTHeapTraversalEntry entry = dfs_stack.pop();
518 int pointee_object_index = entry._pointee_object_index;
519 oop pointee_heap_object = materialize_object(pointee_object_index, dfs_stack, CHECK);
520 oop heap_object = heap_object_for_object_index(entry._base_object_index);
521 if (_allow_gc) {
522 heap_object->obj_field_put(entry._heap_field_offset_bytes, pointee_heap_object);
523 } else {
524 heap_object->obj_field_put_access<IS_DEST_UNINITIALIZED>(entry._heap_field_offset_bytes, pointee_heap_object);
525 }
526 }
527 }
528
529 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_transitive(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
530 assert_locked_or_safepoint(AOTHeapLoading_lock);
531 while (_waiting_for_iterator) {
532 wait_for_iterator();
533 }
534
535 auto handlized_materialize_object = [&](TRAPS) {
536 oop obj = materialize_object(object_index, dfs_stack, CHECK_(Handle()));
537 return Handle(THREAD, obj);
538 };
539
540 Handle result = handlized_materialize_object(CHECK_NULL);
541 drain_stack(dfs_stack, CHECK_NULL);
542
543 return result();
544 }
545
546 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_root(int root_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
547 int root_object_index = object_index_for_root_index(root_index);
548 oop root = materialize_object_transitive(root_object_index, dfs_stack, CHECK_NULL);
549 install_root(root_index, root);
550
551 return root;
552 }
553
554 int oop_handle_cmp(const void* left, const void* right) {
555 oop* left_handle = *(oop**)left;
556 oop* right_handle = *(oop**)right;
557
558 if (right_handle > left_handle) {
559 return -1;
560 } else if (left_handle > right_handle) {
561 return 1;
562 }
563
564 return 0;
565 }
566
567 // The range is inclusive
568 void AOTStreamedHeapLoader::IterativeObjectLoader::initialize_range(int first_object_index, int last_object_index, TRAPS) {
569 for (int i = first_object_index; i <= last_object_index; ++i) {
570 oopDesc* archive_object = archive_object_for_object_index(i);
571 markWord mark = archive_object->mark();
572 bool string_intern = mark.is_marked();
573 if (string_intern) {
574 int value_object_index = archived_string_value_object_index(archive_object);
575 if (value_object_index == i + 1) {
576 // Interned strings are eagerly materialized in the allocation phase, so there is
577 // nothing else to do for interned strings here for the string nor its value array.
578 i++;
579 }
580 continue;
581 }
582 size_t size = archive_object_size(archive_object);
583 oop heap_object = heap_object_for_object_index(i);
584 copy_object_eager_linking(archive_object, heap_object, size);
585 }
586 }
587
588 // The range is inclusive
589 size_t AOTStreamedHeapLoader::IterativeObjectLoader::materialize_range(int first_object_index, int last_object_index, TRAPS) {
590 GrowableArrayCHeap<int, mtClassShared> lazy_object_indices(0);
591 size_t materialized_words = 0;
592
593 for (int i = first_object_index; i <= last_object_index; ++i) {
594 oopDesc* archive_object = archive_object_for_object_index(i);
595 markWord mark = archive_object->mark();
596
597 // The markWord is marked if the object is a String and it should be interned,
598 // make sure to unmark it before allocating memory for the object.
599 bool string_intern = mark.is_marked();
600 mark = mark.set_unmarked();
601
602 size_t size = archive_object_size(archive_object);
603 materialized_words += size;
604
605 oop heap_object = heap_object_for_object_index(i);
606 if (heap_object != nullptr) {
607 // Lazy loading has already initialized the object; we must not mutate it
608 lazy_object_indices.append(i);
609 continue;
610 }
611
612 if (!string_intern) {
613 // The normal case; no lazy loading have loaded the object yet
614 heap_object = allocate_object(archive_object, mark, size, CHECK_0);
615 set_heap_object_for_object_index(i, heap_object);
616 continue;
617 }
618
619 // Eagerly materialize interned strings to ensure that objects earlier than the string
620 // in a batch get linked to the intended interned string, and not a copy.
621 int value_object_index = archived_string_value_object_index(archive_object);
622
623 bool is_normal_interned_string = value_object_index == i + 1;
624
625 if (value_object_index < first_object_index) {
626 // If materialized in a previous batch, the value should already be allocated and initialized.
627 assert(heap_object_for_object_index(value_object_index) != nullptr, "should be materialized");
628 } else {
629 // Materialize the value object.
630 oopDesc* archive_value_object = archive_object_for_object_index(value_object_index);
631 markWord value_mark = archive_value_object->mark();
632 size_t value_size = archive_object_size(archive_value_object);
633 oop value_heap_object;
634
635 if (is_normal_interned_string) {
636 // The common case: the value is next to the string. This happens when only the interned
637 // string points to its value character array.
638 assert(value_object_index <= last_object_index, "Must be within this batch: %d <= %d", value_object_index, last_object_index);
639 value_heap_object = allocate_object(archive_value_object, value_mark, value_size, CHECK_0);
640 set_heap_object_for_object_index(value_object_index, value_heap_object);
641 materialized_words += value_size;
642 } else {
643 // In the uncommon case, multiple strings point to the value of an interned string.
644 // The string can then be earlier in the batch.
645 assert(value_object_index < i, "surprising index");
646 value_heap_object = heap_object_for_object_index(value_object_index);
647 }
648
649 copy_object_eager_linking(archive_value_object, value_heap_object, value_size);
650 }
651 // Allocate and link the string.
652 heap_object = allocate_object(archive_object, mark, size, CHECK_0);
653 copy_object_eager_linking(archive_object, heap_object, size);
654
655 assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
656
657 // Replace the string with interned string
658 heap_object = StringTable::intern(heap_object, CHECK_0);
659 set_heap_object_for_object_index(i, heap_object);
660
661 if (is_normal_interned_string) {
662 // Skip over the string value, already materialized
663 i++;
664 }
665 }
666
667 if (lazy_object_indices.is_empty()) {
668 // Normal case; no sprinkled lazy objects in the root subgraph
669 initialize_range(first_object_index, last_object_index, CHECK_0);
670 } else {
671 // The user lazy initialized some objects that are already initialized; we have to initialize around them
672 // to make sure they are not mutated.
673 int previous_object_index = first_object_index - 1; // Exclusive start of initialization slice
674 for (int i = 0; i < lazy_object_indices.length(); ++i) {
675 int lazy_object_index = lazy_object_indices.at(i);
676 int slice_start_object_index = previous_object_index;
677 int slice_end_object_index = lazy_object_index;
678
679 if (slice_end_object_index - slice_start_object_index > 1) { // Both markers are exclusive
680 initialize_range(slice_start_object_index + 1, slice_end_object_index - 1, CHECK_0);
681 }
682 previous_object_index = lazy_object_index;
683 }
684 // Process tail range
685 if (last_object_index - previous_object_index > 0) {
686 initialize_range(previous_object_index + 1, last_object_index, CHECK_0);
687 }
688 }
689
690 return materialized_words;
691 }
692
693 bool AOTStreamedHeapLoader::IterativeObjectLoader::has_more() {
694 return _current_root_index < _num_roots;
695 }
696
697 void AOTStreamedHeapLoader::IterativeObjectLoader::materialize_next_batch(TRAPS) {
698 assert(has_more(), "only materialize if there is something to materialize");
699
700 int min_batch_objects = 128;
701 int from_root_index = _current_root_index;
702 int max_to_root_index = _num_roots - 1;
703 int until_root_index = from_root_index;
704 int highest_object_index;
705
706 // Expand the batch size from one root, to N roots until we cross 128 objects in total
707 for (;;) {
708 highest_object_index = highest_object_index_for_root_index(until_root_index);
709 if (highest_object_index - _previous_batch_last_object_index >= min_batch_objects) {
710 break;
711 }
712 if (until_root_index == max_to_root_index) {
713 break;
714 }
715 until_root_index++;
716 }
717
718 oop root = nullptr;
719
720 // Materialize objects of necessary, representing the transitive closure of the root
721 if (highest_object_index > _previous_batch_last_object_index) {
722 while (_swapping_root_format) {
723 // When the roots are being upgraded to use handles, it is not safe to racingly
724 // iterate over the object; we must wait. Setting the current batch last object index
725 // to something other than the previous batch last object index indicates to the
726 // root swapping that there is current iteration ongoing.
727 AOTHeapLoading_lock->wait();
728 }
729 int first_object_index = _previous_batch_last_object_index + 1;
730 _current_batch_last_object_index = highest_object_index;
731 size_t allocated_words;
732 {
733 MutexUnlocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
734 allocated_words = materialize_range(first_object_index, highest_object_index, CHECK);
735 }
736 _allocated_words += allocated_words;
737 _previous_batch_last_object_index = _current_batch_last_object_index;
738 if (_waiting_for_iterator) {
739 // If tracer is waiting, let it know at the next point of unlocking that the root
740 // set it waited for has been processed now.
741 AOTHeapLoading_lock->notify_all();
742 }
743 }
744
745 // Install the root
746 for (int i = from_root_index; i <= until_root_index; ++i) {
747 int root_object_index = object_index_for_root_index(i);
748 root = heap_object_for_object_index(root_object_index);
749 install_root(i, root);
750 ++_current_root_index;
751 }
752 }
753
754 bool AOTStreamedHeapLoader::materialize_early(TRAPS) {
755 Ticks start = Ticks::now();
756
757 // Only help with early materialization from the AOT thread if the heap archive can be allocated
758 // without the need for a GC. Otherwise, do lazy loading until GC is enabled later in the bootstrapping.
759 size_t bootstrap_max_memory = Universe::heap()->bootstrap_max_memory();
760 size_t bootstrap_min_memory = MAX2(_heap_region_used, 2 * M);
761
762 size_t before_gc_materialize_budget_bytes = (bootstrap_max_memory > bootstrap_min_memory) ? bootstrap_max_memory - bootstrap_min_memory : 0;
763 size_t before_gc_materialize_budget_words = before_gc_materialize_budget_bytes / HeapWordSize;
764
765 log_info(aot, heap)("Max bootstrapping memory: %zuM, min bootstrapping memory: %zuM, selected budget: %zuM",
766 bootstrap_max_memory / M, bootstrap_min_memory / M, before_gc_materialize_budget_bytes / M);
767
768 while (IterativeObjectLoader::has_more()) {
769 if (_allow_gc || _allocated_words > before_gc_materialize_budget_words) {
770 log_info(aot, heap)("Early object materialization interrupted at root %d", _current_root_index);
771 break;
772 }
773
774 IterativeObjectLoader::materialize_next_batch(CHECK_false);
775 }
776
777 _early_materialization_time_ns = (Ticks::now() - start).nanoseconds();
778
779 bool finished_before_gc_allowed = !_allow_gc && !IterativeObjectLoader::has_more();
780
781 return finished_before_gc_allowed;
782 }
783
784 void AOTStreamedHeapLoader::materialize_late(TRAPS) {
785 Ticks start = Ticks::now();
786
787 // Continue materializing with GC allowed
788
789 while (IterativeObjectLoader::has_more()) {
790 IterativeObjectLoader::materialize_next_batch(CHECK);
791 }
792
793 _late_materialization_time_ns = (Ticks::now() - start).nanoseconds();
794 }
795
796 void AOTStreamedHeapLoader::cleanup() {
797 // First ensure there is no concurrent tracing going on
798 while (_waiting_for_iterator) {
799 AOTHeapLoading_lock->wait();
800 }
801
802 Ticks start = Ticks::now();
803
804 // Remove OopStorage roots
805 if (_objects_are_handles) {
806 size_t num_handles = _num_archived_objects;
807 // Skip the null entry
808 oop** handles = ((oop**)_object_index_to_heap_object_table) + 1;
809 // Sort the handles so that oop storage can release them faster
810 qsort(handles, num_handles, sizeof(oop*), (int (*)(const void*, const void*))oop_handle_cmp);
811 size_t num_null_handles = 0;
812 for (size_t handles_remaining = num_handles; handles_remaining != 0; --handles_remaining) {
813 oop* handle = handles[handles_remaining - 1];
814 if (handle == nullptr) {
815 num_null_handles = handles_remaining;
816 break;
817 }
818 NativeAccess<>::oop_store(handle, nullptr);
819 }
820 Universe::vm_global()->release(&handles[num_null_handles], num_handles - num_null_handles);
821 }
822
823 FREE_C_HEAP_ARRAY(void*, _object_index_to_heap_object_table);
824
825 // Unmap regions
826 FileMapInfo::current_info()->unmap_region(AOTMetaspace::hp);
827 FileMapInfo::current_info()->unmap_region(AOTMetaspace::bm);
828
829 _cleanup_materialization_time_ns = (Ticks::now() - start).nanoseconds();
830
831 log_statistics();
832 }
833
834 void AOTStreamedHeapLoader::log_statistics() {
835 uint64_t total_duration_us = (Ticks::now() - _materialization_start_ticks).microseconds();
836 const bool is_async = _loading_all_objects && !AOTEagerlyLoadObjects;
837 const char* const async_or_sync = is_async ? "async" : "sync";
838 log_info(aot, heap)("start to finish materialization time: " UINT64_FORMAT "us",
839 total_duration_us);
840 log_info(aot, heap)("early object materialization time (%s): " UINT64_FORMAT "us",
841 async_or_sync, _early_materialization_time_ns / 1000);
842 log_info(aot, heap)("late object materialization time (%s): " UINT64_FORMAT "us",
843 async_or_sync, _late_materialization_time_ns / 1000);
844 log_info(aot, heap)("object materialization cleanup time (%s): " UINT64_FORMAT "us",
845 async_or_sync, _cleanup_materialization_time_ns / 1000);
846 log_info(aot, heap)("final object materialization time stall (sync): " UINT64_FORMAT "us",
847 _final_materialization_time_ns / 1000);
848 log_info(aot, heap)("bootstrapping lazy materialization time (sync): " UINT64_FORMAT "us",
849 _accumulated_lazy_materialization_time_ns / 1000);
850
851 uint64_t sync_time = _final_materialization_time_ns + _accumulated_lazy_materialization_time_ns;
852 uint64_t async_time = _early_materialization_time_ns + _late_materialization_time_ns + _cleanup_materialization_time_ns;
853
854 if (!is_async) {
855 sync_time += async_time;
856 async_time = 0;
857 }
858
859 log_info(aot, heap)("sync materialization time: " UINT64_FORMAT "us",
860 sync_time / 1000);
861
862 log_info(aot, heap)("async materialization time: " UINT64_FORMAT "us",
863 async_time / 1000);
864
865 uint64_t iterative_time = (uint64_t)(is_async ? async_time : sync_time);
866 uint64_t materialized_bytes = _allocated_words * HeapWordSize;
867 log_info(aot, heap)("%s materialized " UINT64_FORMAT "K (" UINT64_FORMAT "M/s)", async_or_sync,
868 materialized_bytes / 1024, uint64_t(materialized_bytes * UCONST64(1'000'000'000) / M / iterative_time));
869 }
870
871 void AOTStreamedHeapLoader::materialize_objects() {
872 // We cannot handle any exception when materializing roots. Exits the VM.
873 EXCEPTION_MARK
874
875 // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
876 HandleMark hm(THREAD);
877
878 // Early materialization with a budget before GC is allowed
879 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
880
881 materialize_early(CHECK);
882 await_gc_enabled();
883 materialize_late(CHECK);
884 // Notify materialization is done
885 AOTHeapLoading_lock->notify_all();
886 cleanup();
887 }
888
889 void AOTStreamedHeapLoader::switch_object_index_to_handle(int object_index) {
890 oop heap_object = cast_to_oop(_object_index_to_heap_object_table[object_index]);
891 if (heap_object == nullptr) {
892 return;
893 }
894
895 oop* handle = Universe::vm_global()->allocate();
896 NativeAccess<>::oop_store(handle, heap_object);
897 _object_index_to_heap_object_table[object_index] = handle;
898 }
899
900 void AOTStreamedHeapLoader::enable_gc() {
901 if (AOTEagerlyLoadObjects && !IterativeObjectLoader::has_more()) {
902 // Everything was loaded eagerly at early startup
903 return;
904 }
905
906 // We cannot handle any exception when materializing roots. Exits the VM.
907 EXCEPTION_MARK
908
909 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
910
911 // First wait until no tracing is active
912 while (_waiting_for_iterator) {
913 AOTHeapLoading_lock->wait();
914 }
915
916 // Lock further tracing from starting
917 _waiting_for_iterator = true;
918
919 // Record iterator progress
920 int num_handles = (int)_num_archived_objects;
921
922 // Lock further iteration from starting
923 _swapping_root_format = true;
924
925 // Then wait for the iterator to stop
926 while (_previous_batch_last_object_index != _current_batch_last_object_index) {
927 AOTHeapLoading_lock->wait();
928 }
929
930 if (IterativeObjectLoader::has_more()) {
931 // If there is more to be materialized, we have to upgrade the object index
932 // to object mapping to use handles. If there isn't more to materialize, the
933 // handle will no longer e used; they are only used to materialize objects.
934
935 for (int i = 1; i <= num_handles; ++i) {
936 // Upgrade the roots to use handles
937 switch_object_index_to_handle(i);
938 }
939
940 // From now on, accessing the object table must be done through a handle.
941 _objects_are_handles = true;
942 }
943
944 // Unlock tracing
945 _waiting_for_iterator = false;
946
947 // Unlock iteration
948 _swapping_root_format = false;
949
950 _allow_gc = true;
951
952 AOTHeapLoading_lock->notify_all();
953
954 if (AOTEagerlyLoadObjects && IterativeObjectLoader::has_more()) {
955 materialize_late(CHECK);
956 cleanup();
957 }
958 }
959
960 void AOTStreamedHeapLoader::materialize_thread_object() {
961 AOTThread::materialize_thread_object();
962 }
963
964 void AOTStreamedHeapLoader::finish_materialize_objects() {
965 Ticks start = Ticks::now();
966
967 if (_loading_all_objects) {
968 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
969 // Wait for the AOT thread to finish
970 while (IterativeObjectLoader::has_more()) {
971 AOTHeapLoading_lock->wait();
972 }
973 } else {
974 assert(!AOTEagerlyLoadObjects, "sanity");
975 assert(_current_root_index == 0, "sanity");
976 // Without the full module graph we have done only lazy tracing materialization.
977 // Ensure all roots are processed here by triggering root loading on every root.
978 for (int i = 0; i < _num_roots; ++i) {
979 get_root(i);
980 }
981 cleanup();
982 }
983
984 _final_materialization_time_ns = (Ticks::now() - start).nanoseconds();
985 }
986
987 void account_lazy_materialization_time_ns(uint64_t time, const char* description, int index) {
988 AtomicAccess::add(&_accumulated_lazy_materialization_time_ns, time);
989 log_debug(aot, heap)("Lazy materialization of %s: %d end (" UINT64_FORMAT " us of " UINT64_FORMAT " us)", description, index, time / 1000, _accumulated_lazy_materialization_time_ns / 1000);
990 }
991
992 // Initialize an empty array of AOT heap roots; materialize them lazily
993 void AOTStreamedHeapLoader::initialize() {
994 EXCEPTION_MARK
995
996 _materialization_start_ticks = Ticks::now();
997
998 FileMapInfo::current_info()->map_bitmap_region();
999
1000 _heap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
1001 _bitmap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::bm);
1002
1003 assert(_heap_region->used() > 0, "empty heap archive?");
1004
1005 _is_in_use = true;
1006
1007 // archived roots are at this offset in the stream.
1008 size_t roots_offset = FileMapInfo::current_info()->streamed_heap()->roots_offset();
1009 size_t forwarding_offset = FileMapInfo::current_info()->streamed_heap()->forwarding_offset();
1010 size_t root_highest_object_index_table_offset = FileMapInfo::current_info()->streamed_heap()->root_highest_object_index_table_offset();
1011 _num_archived_objects = FileMapInfo::current_info()->streamed_heap()->num_archived_objects();
1012
1013 // The first int is the length of the array
1014 _roots_archive = ((int*)(((address)_heap_region->mapped_base()) + roots_offset)) + 1;
1015 _num_roots = _roots_archive[-1];
1016 _heap_region_used = _heap_region->used();
1017
1018 // We can't retire a TLAB until the filler klass is set; set it to the archived object klass.
1019 CollectedHeap::set_filler_object_klass(vmClasses::Object_klass());
1020
1021 objArrayOop roots = oopFactory::new_objectArray(_num_roots, CHECK);
1022 _roots = OopHandle(Universe::vm_global(), roots);
1023
1024 _object_index_to_buffer_offset_table = (size_t*)(((address)_heap_region->mapped_base()) + forwarding_offset);
1025 // We allocate the first entry for "null"
1026 _object_index_to_heap_object_table = NEW_C_HEAP_ARRAY(void*, _num_archived_objects + 1, mtClassShared);
1027 Copy::zero_to_bytes(_object_index_to_heap_object_table, (_num_archived_objects + 1) * sizeof(void*));
1028
1029 _root_highest_object_index_table = (int*)(((address)_heap_region->mapped_base()) + root_highest_object_index_table_offset);
1030
1031 address start = (address)(_bitmap_region->mapped_base()) + _heap_region->oopmap_offset();
1032 _oopmap = BitMapView((BitMap::bm_word_t*)start, _heap_region->oopmap_size_in_bits());
1033
1034
1035 if (FLAG_IS_DEFAULT(AOTEagerlyLoadObjects)) {
1036 // Concurrency will not help much if there are no extra cores available.
1037 FLAG_SET_ERGO(AOTEagerlyLoadObjects, os::initial_active_processor_count() <= 1);
1038 }
1039
1040 // If the full module graph is not available or the JVMTI class file load hook is on, we
1041 // will prune the object graph to not include cached objects in subgraphs that are not intended
1042 // to be loaded.
1043 _loading_all_objects = CDSConfig::is_using_full_module_graph() && !JvmtiExport::should_post_class_file_load_hook();
1044 if (!_loading_all_objects) {
1045 // When not using FMG, fall back to tracing materialization
1046 FLAG_SET_ERGO(AOTEagerlyLoadObjects, false);
1047 return;
1048 }
1049
1050 if (AOTEagerlyLoadObjects) {
1051 // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
1052 HandleMark hm(THREAD);
1053
1054 // Early materialization with a budget before GC is allowed
1055 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1056
1057 bool finished_before_gc_allowed = materialize_early(CHECK);
1058 if (finished_before_gc_allowed) {
1059 cleanup();
1060 }
1061 } else {
1062 AOTThread::initialize();
1063 }
1064 }
1065
1066 oop AOTStreamedHeapLoader::materialize_root(int root_index) {
1067 Ticks start = Ticks::now();
1068 // We cannot handle any exception when materializing a root. Exits the VM.
1069 EXCEPTION_MARK
1070 Stack<AOTHeapTraversalEntry, mtClassShared> dfs_stack;
1071 HandleMark hm(THREAD);
1072
1073 oop result;
1074 {
1075 MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1076
1077 oop root = objArrayOop(_roots.resolve())->obj_at(root_index);
1078
1079 if (root != nullptr) {
1080 // The root has already been materialized
1081 result = root;
1082 } else {
1083 // The root has not been materialized, start tracing materialization
1084 result = TracingObjectLoader::materialize_root(root_index, dfs_stack, CHECK_NULL);
1085 }
1086 }
1087
1088 uint64_t duration = (Ticks::now() - start).nanoseconds();
1089
1090 account_lazy_materialization_time_ns(duration, "root", root_index);
1091
1092 return result;
1093 }
1094
1095 oop AOTStreamedHeapLoader::get_root(int index) {
1096 oop result = objArrayOop(_roots.resolve())->obj_at(index);
1097 if (result == nullptr) {
1098 // Materialize root
1099 result = materialize_root(index);
1100 }
1101 if (result == _roots.resolve()) {
1102 // A self-reference to the roots array acts as a sentinel object for null,
1103 // indicating that the root has been cleared.
1104 result = nullptr;
1105 }
1106 // Acquire the root transitive object payload
1107 OrderAccess::acquire();
1108 return result;
1109 }
1110
1111 void AOTStreamedHeapLoader::clear_root(int index) {
1112 // Self-reference to the roots array acts as a sentinel object for null,
1113 // indicating that the root has been cleared.
1114 objArrayOop(_roots.resolve())->obj_at_put(index, _roots.resolve());
1115 }
1116
1117 void AOTStreamedHeapLoader::await_gc_enabled() {
1118 while (!_allow_gc) {
1119 AOTHeapLoading_lock->wait();
1120 }
1121 }
1122
1123 void AOTStreamedHeapLoader::finish_initialization(FileMapInfo* static_mapinfo) {
1124 static_mapinfo->stream_heap_region();
1125 }
1126
1127 AOTMapLogger::OopDataIterator* AOTStreamedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
1128 class StreamedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
1129 private:
1130 int _current;
1131 int _next;
1132
1133 address _buffer_start;
1134
1135 int _num_archived_objects;
1136
1137 public:
1138 StreamedLoaderOopIterator(address buffer_start,
1139 int num_archived_objects)
1140 : _current(0),
1141 _next(1),
1142 _buffer_start(buffer_start),
1143 _num_archived_objects(num_archived_objects) {
1144 }
1145
1146 AOTMapLogger::OopData capture(int dfs_index) {
1147 size_t buffered_offset = buffer_offset_for_object_index(dfs_index);
1148 address buffered_addr = _buffer_start + buffered_offset;
1149 oopDesc* raw_oop = (oopDesc*)buffered_addr;
1150 size_t size = archive_object_size(raw_oop);
1151
1152 intptr_t target_location = (intptr_t)buffered_offset;
1153 uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
1154 Klass* klass = raw_oop->klass();
1155
1156 address requested_addr = (address)buffered_offset;
1157
1158 return { buffered_addr,
1159 requested_addr,
1160 target_location,
1161 narrow_location,
1162 raw_oop,
1163 klass,
1164 size,
1165 false };
1166 }
1167
1168 bool has_next() override {
1169 return _next <= _num_archived_objects;
1170 }
1171
1172 AOTMapLogger::OopData next() override {
1173 _current = _next;
1174 AOTMapLogger::OopData result = capture(_current);
1175 _next = _current + 1;
1176 return result;
1177 }
1178
1179 AOTMapLogger::OopData obj_at(narrowOop* addr) override {
1180 int dfs_index = (int)(*addr);
1181 if (dfs_index == 0) {
1182 return null_data();
1183 } else {
1184 return capture(dfs_index);
1185 }
1186 }
1187
1188 AOTMapLogger::OopData obj_at(oop* addr) override {
1189 int dfs_index = (int)cast_from_oop<uintptr_t>(*addr);
1190 if (dfs_index == 0) {
1191 return null_data();
1192 } else {
1193 return capture(dfs_index);
1194 }
1195 }
1196
1197 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
1198 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
1199
1200 for (int i = 0; i < _num_roots; ++i) {
1201 int object_index = object_index_for_root_index(i);
1202 result->append(capture(object_index));
1203 }
1204
1205 return result;
1206 }
1207 };
1208
1209 assert(_is_in_use, "printing before initializing?");
1210
1211 return new StreamedLoaderOopIterator(buffer_start, (int)info->streamed_heap()->num_archived_objects());
1212 }
1213
1214 #endif // INCLUDE_CDS_JAVA_HEAP