1 /*
   2  * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotMetaspace.hpp"
  26 #include "cds/aotStreamedHeapLoader.hpp"
  27 #include "cds/aotThread.hpp"
  28 #include "cds/cdsConfig.hpp"
  29 #include "cds/filemap.hpp"
  30 #include "cds/heapShared.inline.hpp"
  31 #include "classfile/classLoaderDataShared.hpp"
  32 #include "classfile/javaClasses.inline.hpp"
  33 #include "classfile/stringTable.hpp"
  34 #include "classfile/vmClasses.hpp"
  35 #include "gc/shared/collectedHeap.inline.hpp"
  36 #include "gc/shared/oopStorage.inline.hpp"
  37 #include "gc/shared/oopStorageSet.inline.hpp"
  38 #include "logging/log.hpp"
  39 #include "memory/iterator.inline.hpp"
  40 #include "memory/oopFactory.hpp"
  41 #include "oops/access.inline.hpp"
  42 #include "oops/objArrayOop.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/globals_extension.hpp"
  46 #include "runtime/handles.inline.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/mutex.hpp"
  49 #include "runtime/thread.hpp"
  50 #include "utilities/bitMap.inline.hpp"
  51 #include "utilities/exceptions.hpp"
  52 #include "utilities/globalDefinitions.hpp"
  53 #include "utilities/stack.inline.hpp"
  54 #include "utilities/ticks.hpp"
  55 
  56 #include <type_traits>
  57 
  58 #if INCLUDE_CDS_JAVA_HEAP
  59 
  60 FileMapRegion* AOTStreamedHeapLoader::_heap_region;
  61 FileMapRegion* AOTStreamedHeapLoader::_bitmap_region;
  62 int* AOTStreamedHeapLoader::_roots_archive;
  63 OopHandle AOTStreamedHeapLoader::_roots;
  64 BitMapView AOTStreamedHeapLoader::_oopmap;
  65 bool AOTStreamedHeapLoader::_is_in_use;
  66 int AOTStreamedHeapLoader::_previous_batch_last_object_index;
  67 int AOTStreamedHeapLoader::_current_batch_last_object_index;
  68 int AOTStreamedHeapLoader::_current_root_index;
  69 size_t AOTStreamedHeapLoader::_allocated_words;
  70 bool AOTStreamedHeapLoader::_allow_gc;
  71 bool AOTStreamedHeapLoader::_objects_are_handles;
  72 size_t AOTStreamedHeapLoader::_num_archived_objects;
  73 int AOTStreamedHeapLoader::_num_roots;
  74 size_t AOTStreamedHeapLoader::_heap_region_used;
  75 bool AOTStreamedHeapLoader::_loading_all_objects;
  76 
  77 size_t* AOTStreamedHeapLoader::_object_index_to_buffer_offset_table;
  78 void** AOTStreamedHeapLoader::_object_index_to_heap_object_table;
  79 int* AOTStreamedHeapLoader::_root_highest_object_index_table;
  80 
  81 bool AOTStreamedHeapLoader::_waiting_for_iterator;
  82 bool AOTStreamedHeapLoader::_swapping_root_format;
  83 
  84 static uint64_t _early_materialization_time_ns = 0;
  85 static uint64_t _late_materialization_time_ns = 0;
  86 static uint64_t _final_materialization_time_ns = 0;
  87 static uint64_t _cleanup_materialization_time_ns = 0;
  88 static volatile uint64_t _accumulated_lazy_materialization_time_ns = 0;
  89 static Ticks _materialization_start_ticks;
  90 
  91 int AOTStreamedHeapLoader::object_index_for_root_index(int root_index) {
  92   return _roots_archive[root_index];
  93 }
  94 
  95 int AOTStreamedHeapLoader::highest_object_index_for_root_index(int root_index) {
  96   return _root_highest_object_index_table[root_index];
  97 }
  98 
  99 size_t AOTStreamedHeapLoader::buffer_offset_for_object_index(int object_index) {
 100   return _object_index_to_buffer_offset_table[object_index];
 101 }
 102 
 103 oopDesc* AOTStreamedHeapLoader::archive_object_for_object_index(int object_index) {
 104   size_t buffer_offset = buffer_offset_for_object_index(object_index);
 105   address bottom = (address)_heap_region->mapped_base();
 106   return (oopDesc*)(bottom + buffer_offset);
 107 }
 108 
 109 size_t AOTStreamedHeapLoader::buffer_offset_for_archive_object(oopDesc* archive_object) {
 110   address bottom = (address)_heap_region->mapped_base();
 111   return size_t(archive_object) - size_t(bottom);
 112 }
 113 
 114 template <bool use_coops>
 115 BitMap::idx_t AOTStreamedHeapLoader::obj_bit_idx_for_buffer_offset(size_t buffer_offset) {
 116   if constexpr (use_coops) {
 117     return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
 118   } else {
 119     return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
 120   }
 121 }
 122 
 123 oop AOTStreamedHeapLoader::heap_object_for_object_index(int object_index) {
 124   assert(object_index >= 0 && object_index <= (int)_num_archived_objects,
 125          "Heap object reference out of index: %d", object_index);
 126 
 127   if (_objects_are_handles) {
 128     oop* handle = (oop*)_object_index_to_heap_object_table[object_index];
 129     if (handle == nullptr) {
 130       return nullptr;
 131     }
 132     return NativeAccess<>::oop_load(handle);
 133   } else {
 134     return cast_to_oop(_object_index_to_heap_object_table[object_index]);
 135   }
 136 }
 137 
 138 void AOTStreamedHeapLoader::set_heap_object_for_object_index(int object_index, oop heap_object) {
 139   assert(heap_object_for_object_index(object_index) == nullptr, "Should only set once with this API");
 140   if (_objects_are_handles) {
 141     oop* handle = Universe::vm_global()->allocate();
 142     NativeAccess<>::oop_store(handle, heap_object);
 143     _object_index_to_heap_object_table[object_index] = (void*)handle;
 144   } else {
 145     _object_index_to_heap_object_table[object_index] = cast_from_oop<void*>(heap_object);
 146   }
 147 }
 148 
 149 int AOTStreamedHeapLoader::archived_string_value_object_index(oopDesc* archive_object) {
 150     assert(archive_object->klass() == vmClasses::String_klass(), "Must be an archived string");
 151     address archive_string_value_addr = (address)archive_object + java_lang_String::value_offset();
 152     return UseCompressedOops ? *(int*)archive_string_value_addr : (int)*(int64_t*)archive_string_value_addr;
 153 }
 154 
 155 static int archive_array_length(oopDesc* archive_array) {
 156   return *(int*)(address(archive_array) + arrayOopDesc::length_offset_in_bytes());
 157 }
 158 
 159 static size_t archive_object_size(oopDesc* archive_object) {
 160   Klass* klass = archive_object->klass();
 161   int lh = klass->layout_helper();
 162 
 163   if (Klass::layout_helper_is_instance(lh)) {
 164     // Instance
 165     if (Klass::layout_helper_needs_slow_path(lh)) {
 166       return ((size_t*)(archive_object))[-1];
 167     } else {
 168       size_t size = (size_t)Klass::layout_helper_size_in_bytes(lh) >> LogHeapWordSize;
 169       if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
 170         size = align_object_size(size + 1);
 171       }
 172       return size;
 173     }
 174   } else if (Klass::layout_helper_is_array(lh)) {
 175     // Array
 176     size_t size_in_bytes;
 177     size_t array_length = (size_t)archive_array_length(archive_object);
 178     size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
 179     size_in_bytes += (size_t)Klass::layout_helper_header_size(lh);
 180 
 181     size_t size = align_up(size_in_bytes, (size_t)MinObjAlignmentInBytes) / HeapWordSize;
 182     if (UseCompactObjectHeaders && archive_object->mark().is_expanded() && klass->expand_for_hash(archive_object, archive_object->mark())) {
 183       size = align_object_size(size + 1);
 184     }
 185     return size;
 186   } else {
 187     // Other
 188     return ((size_t*)(archive_object))[-1];
 189   }
 190 }
 191 
 192 oop AOTStreamedHeapLoader::allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS) {
 193   assert(!archive_object->is_stackChunk(), "no such objects are archived");
 194 
 195   NoJvmtiEventsMark njem;
 196   oop heap_object;
 197 
 198   Klass* klass = archive_object->klass();
 199   assert(!(UseCompactObjectHeaders && mark.is_hashed_not_expanded()), "Must not be hashed/not-expanded");
 200   if (klass->is_mirror_instance_klass()) {
 201     size_t base_size = size;
 202     assert(!(UseCompactObjectHeaders && mark.is_not_hashed_expanded()), "should not happen");
 203     heap_object = Universe::heap()->class_allocate(klass, size, base_size, CHECK_NULL);
 204   } else if (klass->is_instance_klass()) {
 205     heap_object = Universe::heap()->obj_allocate(klass, size, CHECK_NULL);
 206   } else {
 207     assert(klass->is_array_klass(), "must be");
 208     int length = archive_array_length(archive_object);
 209     bool do_zero = klass->is_objArray_klass();
 210     heap_object = Universe::heap()->array_allocate(klass, size, length, do_zero, CHECK_NULL);
 211   }
 212 
 213   heap_object->set_mark(mark);
 214 
 215   return heap_object;
 216 }
 217 
 218 void AOTStreamedHeapLoader::install_root(int root_index, oop heap_object) {
 219   objArrayOop roots = objArrayOop(_roots.resolve());
 220   OrderAccess::release(); // Once the store below publishes an object, it can be concurrently picked up by another thread without using the lock
 221   roots->obj_at_put(root_index, heap_object);
 222 }
 223 
 224 void AOTStreamedHeapLoader::TracingObjectLoader::wait_for_iterator() {
 225   if (JavaThread::current()->is_active_Java_thread()) {
 226     // When the main thread has bootstrapped past the point of allowing safepoints,
 227     // we can and indeed have to use safepoint checking waiting.
 228     AOTHeapLoading_lock->wait();
 229   } else {
 230     // If we have no bootstrapped the main thread far enough, then we cannot and
 231     // indeed also don't need to perform safepoint checking waiting.
 232     AOTHeapLoading_lock->wait_without_safepoint_check();
 233   }
 234 }
 235 
 236 // Link object after copying in-place
 237 template <typename LinkerT>
 238 class AOTStreamedHeapLoader::InPlaceLinkingOopClosure : public BasicOopIterateClosure {
 239 private:
 240   oop _obj;
 241   LinkerT _linker;
 242 
 243 public:
 244   InPlaceLinkingOopClosure(oop obj, LinkerT linker)
 245     : _obj(obj),
 246       _linker(linker) {
 247   }
 248 
 249   virtual void do_oop(oop* p) { do_oop_work(p, (int)*(intptr_t*)p); }
 250   virtual void do_oop(narrowOop* p) { do_oop_work(p, *(int*)p); }
 251 
 252   template <typename T>
 253   void do_oop_work(T* p, int object_index) {
 254     int p_offset = pointer_delta_as_int((address)p, cast_from_oop<address>(_obj));
 255     oop pointee = _linker(p_offset, object_index);
 256     if (pointee != nullptr) {
 257       _obj->obj_field_put_access<IS_DEST_UNINITIALIZED>((int)p_offset, pointee);
 258     }
 259   }
 260 };
 261 
 262 template <bool use_coops, typename LinkerT>
 263 void AOTStreamedHeapLoader::copy_payload_carefully(oopDesc* archive_object,
 264                                                    oop heap_object,
 265                                                    BitMap::idx_t header_bit,
 266                                                    BitMap::idx_t start_bit,
 267                                                    BitMap::idx_t end_bit,
 268                                                    LinkerT linker) {
 269   using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
 270   using OopElementT = std::conditional_t<use_coops, narrowOop, oop>;
 271 
 272   BitMap::idx_t unfinished_bit = start_bit;
 273   BitMap::idx_t next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
 274 
 275   // Fill in heap object bytes
 276   while (unfinished_bit < end_bit) {
 277     assert(unfinished_bit >= start_bit && unfinished_bit < end_bit, "out of bounds copying");
 278 
 279     // This is the address of the pointee inside the input stream
 280     size_t payload_offset = unfinished_bit - header_bit;
 281     RawElementT* archive_payload_addr = ((RawElementT*)archive_object) + payload_offset;
 282     RawElementT* heap_payload_addr = cast_from_oop<RawElementT*>(heap_object) + payload_offset;
 283 
 284     assert(heap_payload_addr >= cast_from_oop<RawElementT*>(heap_object) &&
 285            (HeapWord*)heap_payload_addr < cast_from_oop<HeapWord*>(heap_object) + heap_object->size(),
 286            "Out of bounds copying");
 287 
 288     if (next_reference_bit > unfinished_bit) {
 289       // Primitive bytes available
 290       size_t primitive_elements = next_reference_bit - unfinished_bit;
 291       size_t primitive_bytes = primitive_elements * sizeof(RawElementT);
 292       ::memcpy(heap_payload_addr, archive_payload_addr, primitive_bytes);
 293 
 294       unfinished_bit = next_reference_bit;
 295     } else {
 296       // Encountered reference
 297       RawElementT* archive_p = (RawElementT*)archive_payload_addr;
 298       OopElementT* heap_p = (OopElementT*)heap_payload_addr;
 299       int pointee_object_index = (int)*archive_p;
 300       int heap_p_offset = pointer_delta_as_int((address)heap_p, cast_from_oop<address>(heap_object));
 301 
 302       // The object index is retrieved from the archive, not the heap object. This is
 303       // important after GC is enabled. Concurrent GC threads may scan references in the
 304       // heap for various reasons after this point. Therefore, it is not okay to first copy
 305       // the object index from a reference location in the archived object payload to a
 306       // corresponding location in the heap object payload, and then fix it up afterwards to
 307       // refer to a heap object. This is why this code iterates carefully over object references
 308       // in the archived object, linking them one by one, without clobbering the reference
 309       // locations in the heap objects with anything other than transitions from null to the
 310       // intended linked object.
 311       oop obj = linker(heap_p_offset, pointee_object_index);
 312       if (obj != nullptr) {
 313         heap_object->obj_field_put(heap_p_offset, obj);
 314       }
 315 
 316       unfinished_bit++;
 317       next_reference_bit = _oopmap.find_first_set_bit(unfinished_bit, end_bit);
 318     }
 319   }
 320 }
 321 
 322 template <bool use_coops, typename LinkerT>
 323 void AOTStreamedHeapLoader::copy_object_impl(oopDesc* archive_object,
 324                                              oop heap_object,
 325                                              size_t size,
 326                                              LinkerT linker) {
 327   if (!_allow_gc) {
 328     // Without concurrent GC running, we can copy incorrect object references
 329     // and metadata references into the heap object and then fix them up in-place.
 330     size_t offset = 1;
 331     size_t payload_size = size - offset;
 332     HeapWord* archive_start = ((HeapWord*)archive_object);
 333     HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
 334 
 335     Copy::disjoint_words(archive_start + offset, heap_start + offset, payload_size);
 336 
 337     if (UseCompactObjectHeaders) {
 338       // The copying might have missed the first 4 bytes of payload/arraylength, copy that also.
 339       *(reinterpret_cast<jint*>(heap_start) + 1) = *(reinterpret_cast<jint*>(archive_start) + 1);
 340     }
 341 
 342     // In-place linking fixes up object indices from references of the heap object,
 343     // and patches them up to refer to objects. This can be done because we just copied
 344     // the payload of the object from the archive to the heap object, including the
 345     // reference object indices. However, this is only okay to do before the GC can run.
 346     // A concurrent GC thread might racingly read the object payload after GC is enabled.
 347     InPlaceLinkingOopClosure cl(heap_object, linker);
 348     heap_object->oop_iterate(&cl);
 349     HeapShared::remap_loaded_metadata(heap_object);
 350     return;
 351   }
 352 
 353   // When a concurrent GC may be running, we take care not to copy incorrect oops,
 354   // narrowOops or Metadata* into the heap objects. Transitions go from 0 to the
 355   // intended runtime linked values only.
 356   size_t word_scale = use_coops ? 2 : 1;
 357   using RawElementT = std::conditional_t<use_coops, int32_t, int64_t>;
 358 
 359   // Skip the markWord; it is set at allocation time
 360   size_t header_size = (UseCompactObjectHeaders && use_coops) ? 1 : word_scale;
 361 
 362   size_t buffer_offset = buffer_offset_for_archive_object(archive_object);
 363   const BitMap::idx_t header_bit = obj_bit_idx_for_buffer_offset<use_coops>(buffer_offset);
 364   const BitMap::idx_t start_bit = header_bit + header_size;
 365   const BitMap::idx_t end_bit = header_bit + size * word_scale;
 366 
 367   BitMap::idx_t curr_bit = start_bit;
 368 
 369   if (UseCompactObjectHeaders && !use_coops) {
 370     // Copy first 4 primitive bytes.
 371     jint* archive_start = reinterpret_cast<jint*>(archive_object);
 372     HeapWord* heap_start = cast_from_oop<HeapWord*>(heap_object);
 373     *(reinterpret_cast<jint*>(heap_start) + 1) = *(archive_start + 1);
 374   }
 375 
 376   // We are a bit paranoid about GC or other safepointing operations observing
 377   // shady metadata fields from the archive that do not point at real metadata.
 378   // We deal with this by explicitly reading the requested address from the
 379   // archive and fixing it to real Metadata before writing it into the heap object.
 380   HeapShared::do_metadata_offsets(heap_object, [&](int metadata_offset) {
 381     BitMap::idx_t metadata_field_idx = header_bit + (size_t)metadata_offset / sizeof(RawElementT);
 382     BitMap::idx_t skip = word_scale;
 383     assert(metadata_field_idx >= start_bit && metadata_field_idx + skip <= end_bit,
 384            "Metadata field out of bounds");
 385 
 386     // Copy payload before metadata field
 387     copy_payload_carefully<use_coops>(archive_object,
 388                                       heap_object,
 389                                       header_bit,
 390                                       curr_bit,
 391                                       metadata_field_idx,
 392                                       linker);
 393 
 394     // Copy metadata field
 395     Metadata* const archive_metadata = *(Metadata**)(uintptr_t(archive_object) + (size_t)metadata_offset);
 396     Metadata* const runtime_metadata = archive_metadata != nullptr
 397         ? (Metadata*)(address(archive_metadata) + AOTMetaspace::relocation_delta())
 398         : nullptr;
 399     assert(runtime_metadata == nullptr || AOTMetaspace::in_aot_cache(runtime_metadata), "Invalid metadata pointer");
 400     DEBUG_ONLY(Metadata* const previous_metadata = heap_object->metadata_field(metadata_offset);)
 401     assert(previous_metadata == nullptr || previous_metadata == runtime_metadata, "Should not observe transient values");
 402     heap_object->metadata_field_put(metadata_offset, runtime_metadata);
 403     curr_bit = metadata_field_idx + skip;
 404   });
 405 
 406   // Copy trailing metadata after the last metadata word. This is usually doing
 407   // all the copying.
 408   copy_payload_carefully<use_coops>(archive_object,
 409                                     heap_object,
 410                                     header_bit,
 411                                     curr_bit,
 412                                     end_bit,
 413                                     linker);
 414 }
 415 
 416 void AOTStreamedHeapLoader::copy_object_eager_linking(oopDesc* archive_object, oop heap_object, size_t size) {
 417   auto linker = [&](int p_offset, int pointee_object_index) {
 418     oop obj = AOTStreamedHeapLoader::heap_object_for_object_index(pointee_object_index);
 419     assert(pointee_object_index == 0 || obj != nullptr, "Eager object loading should only encounter already allocated links");
 420     return obj;
 421   };
 422   if (UseCompressedOops) {
 423     copy_object_impl<true>(archive_object, heap_object, size, linker);
 424   } else {
 425     copy_object_impl<false>(archive_object, heap_object, size, linker);
 426   }
 427 }
 428 
 429 void AOTStreamedHeapLoader::TracingObjectLoader::copy_object_lazy_linking(int object_index,
 430                                                                           oopDesc* archive_object,
 431                                                                           oop heap_object,
 432                                                                           size_t size,
 433                                                                           Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack) {
 434   auto linker = [&](int p_offset, int pointee_object_index) {
 435     dfs_stack.push({pointee_object_index, object_index, p_offset});
 436 
 437     // The tracing linker is a bit lazy and mutates the reference fields in its traversal.
 438     // Returning null means don't link now.
 439     return oop(nullptr);
 440   };
 441   if (UseCompressedOops) {
 442     copy_object_impl<true>(archive_object, heap_object, size, linker);
 443   } else {
 444     copy_object_impl<false>(archive_object, heap_object, size, linker);
 445   }
 446 }
 447 
 448 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_inner(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
 449   // Allocate object
 450   oopDesc* archive_object = archive_object_for_object_index(object_index);
 451   size_t size = archive_object_size(archive_object);
 452   markWord mark = archive_object->mark();
 453 
 454   // The markWord is marked if the object is a String and it should be interned,
 455   // make sure to unmark it before allocating memory for the object.
 456   bool string_intern = mark.is_marked();
 457   mark = mark.set_unmarked();
 458 
 459   oop heap_object;
 460 
 461   if (string_intern) {
 462     int value_object_index = archived_string_value_object_index(archive_object);
 463 
 464     // Materialize the value object.
 465     (void)materialize_object(value_object_index, dfs_stack, CHECK_NULL);
 466 
 467     // Allocate and link the string.
 468     heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
 469     copy_object_eager_linking(archive_object, heap_object, size);
 470 
 471     assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
 472 
 473     // Replace the string with interned string
 474     heap_object = StringTable::intern(heap_object, CHECK_NULL);
 475   } else {
 476     heap_object = allocate_object(archive_object, mark, size, CHECK_NULL);
 477 
 478     // Fill in object contents
 479     copy_object_lazy_linking(object_index, archive_object, heap_object, size, dfs_stack);
 480   }
 481 
 482   // Install forwarding
 483   set_heap_object_for_object_index(object_index, heap_object);
 484 
 485   return heap_object;
 486 }
 487 
 488 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
 489   if (object_index <= _previous_batch_last_object_index) {
 490     // The transitive closure of this object has been materialized; no need to do anything
 491     return heap_object_for_object_index(object_index);
 492   }
 493 
 494   if (object_index <= _current_batch_last_object_index) {
 495     // The AOTThread is currently materializing this object and its transitive closure; only need to wait for it to complete
 496     _waiting_for_iterator = true;
 497     while (object_index > _previous_batch_last_object_index) {
 498       wait_for_iterator();
 499     }
 500     _waiting_for_iterator = false;
 501 
 502     // Notify the AOT thread if it is waiting for tracing to finish
 503     AOTHeapLoading_lock->notify_all();
 504     return heap_object_for_object_index(object_index);;
 505   }
 506 
 507   oop heap_object = heap_object_for_object_index(object_index);
 508   if (heap_object != nullptr) {
 509     // Already materialized by mutator
 510     return heap_object;
 511   }
 512 
 513   return materialize_object_inner(object_index, dfs_stack, THREAD);
 514 }
 515 
 516 void AOTStreamedHeapLoader::TracingObjectLoader::drain_stack(Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
 517   while (!dfs_stack.is_empty()) {
 518     AOTHeapTraversalEntry entry = dfs_stack.pop();
 519     int pointee_object_index = entry._pointee_object_index;
 520     oop pointee_heap_object = materialize_object(pointee_object_index, dfs_stack, CHECK);
 521     oop heap_object = heap_object_for_object_index(entry._base_object_index);
 522     if (_allow_gc) {
 523       heap_object->obj_field_put(entry._heap_field_offset_bytes, pointee_heap_object);
 524     } else {
 525       heap_object->obj_field_put_access<IS_DEST_UNINITIALIZED>(entry._heap_field_offset_bytes, pointee_heap_object);
 526     }
 527   }
 528 }
 529 
 530 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_object_transitive(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
 531   assert_locked_or_safepoint(AOTHeapLoading_lock);
 532   while (_waiting_for_iterator) {
 533     wait_for_iterator();
 534   }
 535 
 536   auto handlized_materialize_object = [&](TRAPS) {
 537     oop obj = materialize_object(object_index, dfs_stack, CHECK_(Handle()));
 538     return Handle(THREAD, obj);
 539   };
 540 
 541   Handle result = handlized_materialize_object(CHECK_NULL);
 542   drain_stack(dfs_stack, CHECK_NULL);
 543 
 544   return result();
 545 }
 546 
 547 oop AOTStreamedHeapLoader::TracingObjectLoader::materialize_root(int root_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS) {
 548   int root_object_index = object_index_for_root_index(root_index);
 549   oop root = materialize_object_transitive(root_object_index, dfs_stack, CHECK_NULL);
 550   install_root(root_index, root);
 551 
 552   return root;
 553 }
 554 
 555 int oop_handle_cmp(const void* left, const void* right) {
 556   oop* left_handle = *(oop**)left;
 557   oop* right_handle = *(oop**)right;
 558 
 559   if (right_handle > left_handle) {
 560     return -1;
 561   } else if (left_handle > right_handle) {
 562     return 1;
 563   }
 564 
 565   return 0;
 566 }
 567 
 568 // The range is inclusive
 569 void AOTStreamedHeapLoader::IterativeObjectLoader::initialize_range(int first_object_index, int last_object_index, TRAPS) {
 570   for (int i = first_object_index; i <= last_object_index; ++i) {
 571     oopDesc* archive_object = archive_object_for_object_index(i);
 572     markWord mark = archive_object->mark();
 573     bool string_intern = mark.is_marked();
 574     if (string_intern) {
 575       int value_object_index = archived_string_value_object_index(archive_object);
 576       if (value_object_index == i + 1) {
 577         // Interned strings are eagerly materialized in the allocation phase, so there is
 578         // nothing else to do for interned strings here for the string nor its value array.
 579         i++;
 580       }
 581       continue;
 582     }
 583     size_t size = archive_object_size(archive_object);
 584     oop heap_object = heap_object_for_object_index(i);
 585     copy_object_eager_linking(archive_object, heap_object, size);
 586   }
 587 }
 588 
 589 // The range is inclusive
 590 size_t AOTStreamedHeapLoader::IterativeObjectLoader::materialize_range(int first_object_index, int last_object_index, TRAPS) {
 591   GrowableArrayCHeap<int, mtClassShared> lazy_object_indices(0);
 592   size_t materialized_words = 0;
 593 
 594   for (int i = first_object_index; i <= last_object_index; ++i) {
 595     oopDesc* archive_object = archive_object_for_object_index(i);
 596     markWord mark = archive_object->mark();
 597 
 598     // The markWord is marked if the object is a String and it should be interned,
 599     // make sure to unmark it before allocating memory for the object.
 600     bool string_intern = mark.is_marked();
 601     mark = mark.set_unmarked();
 602 
 603     size_t size = archive_object_size(archive_object);
 604     materialized_words += size;
 605 
 606     oop heap_object = heap_object_for_object_index(i);
 607     if (heap_object != nullptr) {
 608       // Lazy loading has already initialized the object; we must not mutate it
 609       lazy_object_indices.append(i);
 610       continue;
 611     }
 612 
 613     if (!string_intern) {
 614      // The normal case; no lazy loading have loaded the object yet
 615       heap_object = allocate_object(archive_object, mark, size, CHECK_0);
 616       set_heap_object_for_object_index(i, heap_object);
 617       continue;
 618     }
 619 
 620     // Eagerly materialize interned strings to ensure that objects earlier than the string
 621     // in a batch get linked to the intended interned string, and not a copy.
 622     int value_object_index = archived_string_value_object_index(archive_object);
 623 
 624     bool is_normal_interned_string = value_object_index == i + 1;
 625 
 626     if (value_object_index < first_object_index) {
 627       // If materialized in a previous batch, the value should already be allocated and initialized.
 628       assert(heap_object_for_object_index(value_object_index) != nullptr, "should be materialized");
 629     } else {
 630       // Materialize the value object.
 631       oopDesc* archive_value_object = archive_object_for_object_index(value_object_index);
 632       markWord value_mark = archive_value_object->mark();
 633       size_t value_size = archive_object_size(archive_value_object);
 634       oop value_heap_object;
 635 
 636       if (is_normal_interned_string) {
 637         // The common case: the value is next to the string. This happens when only the interned
 638         // string points to its value character array.
 639         assert(value_object_index <= last_object_index, "Must be within this batch: %d <= %d", value_object_index, last_object_index);
 640         value_heap_object = allocate_object(archive_value_object, value_mark, value_size, CHECK_0);
 641         set_heap_object_for_object_index(value_object_index, value_heap_object);
 642         materialized_words += value_size;
 643       } else {
 644         // In the uncommon case, multiple strings point to the value of an interned string.
 645         // The string can then be earlier in the batch.
 646         assert(value_object_index < i, "surprising index");
 647         value_heap_object = heap_object_for_object_index(value_object_index);
 648       }
 649 
 650       copy_object_eager_linking(archive_value_object, value_heap_object, value_size);
 651     }
 652     // Allocate and link the string.
 653     heap_object = allocate_object(archive_object, mark, size, CHECK_0);
 654     copy_object_eager_linking(archive_object, heap_object, size);
 655 
 656     assert(java_lang_String::value(heap_object) == heap_object_for_object_index(value_object_index), "Linker should have linked this correctly");
 657 
 658     // Replace the string with interned string
 659     heap_object = StringTable::intern(heap_object, CHECK_0);
 660     set_heap_object_for_object_index(i, heap_object);
 661 
 662     if (is_normal_interned_string) {
 663       // Skip over the string value, already materialized
 664       i++;
 665     }
 666   }
 667 
 668   if (lazy_object_indices.is_empty()) {
 669     // Normal case; no sprinkled lazy objects in the root subgraph
 670     initialize_range(first_object_index, last_object_index, CHECK_0);
 671   } else {
 672     // The user lazy initialized some objects that are already initialized; we have to initialize around them
 673     // to make sure they are not mutated.
 674     int previous_object_index = first_object_index - 1; // Exclusive start of initialization slice
 675     for (int i = 0; i < lazy_object_indices.length(); ++i) {
 676       int lazy_object_index = lazy_object_indices.at(i);
 677       int slice_start_object_index = previous_object_index;
 678       int slice_end_object_index = lazy_object_index;
 679 
 680       if (slice_end_object_index - slice_start_object_index > 1) { // Both markers are exclusive
 681         initialize_range(slice_start_object_index + 1, slice_end_object_index - 1, CHECK_0);
 682       }
 683       previous_object_index = lazy_object_index;
 684     }
 685     // Process tail range
 686     if (last_object_index - previous_object_index > 0) {
 687       initialize_range(previous_object_index + 1, last_object_index, CHECK_0);
 688     }
 689   }
 690 
 691   return materialized_words;
 692 }
 693 
 694 bool AOTStreamedHeapLoader::IterativeObjectLoader::has_more() {
 695   return _current_root_index < _num_roots;
 696 }
 697 
 698 void AOTStreamedHeapLoader::IterativeObjectLoader::materialize_next_batch(TRAPS) {
 699   assert(has_more(), "only materialize if there is something to materialize");
 700 
 701   int min_batch_objects = 128;
 702   int from_root_index = _current_root_index;
 703   int max_to_root_index = _num_roots - 1;
 704   int until_root_index = from_root_index;
 705   int highest_object_index;
 706 
 707   // Expand the batch size from one root, to N roots until we cross 128 objects in total
 708   for (;;) {
 709     highest_object_index = highest_object_index_for_root_index(until_root_index);
 710     if (highest_object_index - _previous_batch_last_object_index >= min_batch_objects) {
 711       break;
 712     }
 713     if (until_root_index == max_to_root_index) {
 714       break;
 715     }
 716     until_root_index++;
 717   }
 718 
 719   oop root = nullptr;
 720 
 721   // Materialize objects of necessary, representing the transitive closure of the root
 722   if (highest_object_index > _previous_batch_last_object_index) {
 723     while (_swapping_root_format) {
 724       // When the roots are being upgraded to use handles, it is not safe to racingly
 725       // iterate over the object; we must wait. Setting the current batch last object index
 726       // to something other than the previous batch last object index indicates to the
 727       // root swapping that there is current iteration ongoing.
 728       AOTHeapLoading_lock->wait();
 729     }
 730     int first_object_index = _previous_batch_last_object_index + 1;
 731     _current_batch_last_object_index = highest_object_index;
 732     size_t allocated_words;
 733     {
 734       MutexUnlocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
 735       allocated_words = materialize_range(first_object_index, highest_object_index, CHECK);
 736     }
 737     _allocated_words += allocated_words;
 738     _previous_batch_last_object_index = _current_batch_last_object_index;
 739     if (_waiting_for_iterator) {
 740       // If tracer is waiting, let it know at the next point of unlocking that the root
 741       // set it waited for has been processed now.
 742       AOTHeapLoading_lock->notify_all();
 743     }
 744   }
 745 
 746   // Install the root
 747   for (int i = from_root_index; i <= until_root_index; ++i) {
 748     int root_object_index = object_index_for_root_index(i);
 749     root = heap_object_for_object_index(root_object_index);
 750     install_root(i, root);
 751     ++_current_root_index;
 752   }
 753 }
 754 
 755 bool AOTStreamedHeapLoader::materialize_early(TRAPS) {
 756   Ticks start = Ticks::now();
 757 
 758   // Only help with early materialization from the AOT thread if the heap archive can be allocated
 759   // without the need for a GC. Otherwise, do lazy loading until GC is enabled later in the bootstrapping.
 760   size_t bootstrap_max_memory = Universe::heap()->bootstrap_max_memory();
 761   size_t bootstrap_min_memory = MAX2(_heap_region_used, 2 * M);
 762 
 763   size_t before_gc_materialize_budget_bytes = (bootstrap_max_memory > bootstrap_min_memory) ? bootstrap_max_memory - bootstrap_min_memory : 0;
 764   size_t before_gc_materialize_budget_words = before_gc_materialize_budget_bytes / HeapWordSize;
 765 
 766   log_info(aot, heap)("Max bootstrapping memory: %zuM, min bootstrapping memory: %zuM, selected budget: %zuM",
 767                       bootstrap_max_memory / M, bootstrap_min_memory / M, before_gc_materialize_budget_bytes / M);
 768 
 769   while (IterativeObjectLoader::has_more()) {
 770     if (_allow_gc || _allocated_words > before_gc_materialize_budget_words) {
 771       log_info(aot, heap)("Early object materialization interrupted at root %d", _current_root_index);
 772       break;
 773     }
 774 
 775     IterativeObjectLoader::materialize_next_batch(CHECK_false);
 776   }
 777 
 778   _early_materialization_time_ns = (Ticks::now() - start).nanoseconds();
 779 
 780   bool finished_before_gc_allowed = !_allow_gc && !IterativeObjectLoader::has_more();
 781 
 782   return finished_before_gc_allowed;
 783 }
 784 
 785 void AOTStreamedHeapLoader::materialize_late(TRAPS) {
 786   Ticks start = Ticks::now();
 787 
 788   // Continue materializing with GC allowed
 789 
 790   while (IterativeObjectLoader::has_more()) {
 791     IterativeObjectLoader::materialize_next_batch(CHECK);
 792   }
 793 
 794   _late_materialization_time_ns = (Ticks::now() - start).nanoseconds();
 795 }
 796 
 797 void AOTStreamedHeapLoader::cleanup() {
 798   // First ensure there is no concurrent tracing going on
 799   while (_waiting_for_iterator) {
 800     AOTHeapLoading_lock->wait();
 801   }
 802 
 803   Ticks start = Ticks::now();
 804 
 805   // Remove OopStorage roots
 806   if (_objects_are_handles) {
 807     size_t num_handles = _num_archived_objects;
 808     // Skip the null entry
 809     oop** handles = ((oop**)_object_index_to_heap_object_table) + 1;
 810     // Sort the handles so that oop storage can release them faster
 811     qsort(handles, num_handles, sizeof(oop*), (int (*)(const void*, const void*))oop_handle_cmp);
 812     size_t num_null_handles = 0;
 813     for (size_t handles_remaining = num_handles; handles_remaining != 0; --handles_remaining) {
 814       oop* handle = handles[handles_remaining - 1];
 815       if (handle == nullptr) {
 816         num_null_handles = handles_remaining;
 817         break;
 818       }
 819       NativeAccess<>::oop_store(handle, nullptr);
 820     }
 821     Universe::vm_global()->release(&handles[num_null_handles], num_handles - num_null_handles);
 822   }
 823 
 824   FREE_C_HEAP_ARRAY(void*, _object_index_to_heap_object_table);
 825 
 826   // Unmap regions
 827   FileMapInfo::current_info()->unmap_region(AOTMetaspace::hp);
 828   FileMapInfo::current_info()->unmap_region(AOTMetaspace::bm);
 829 
 830   _cleanup_materialization_time_ns = (Ticks::now() - start).nanoseconds();
 831 
 832   log_statistics();
 833 }
 834 
 835 void AOTStreamedHeapLoader::log_statistics() {
 836   uint64_t total_duration_us = (Ticks::now() - _materialization_start_ticks).microseconds();
 837   const bool is_async = _loading_all_objects && !AOTEagerlyLoadObjects;
 838   const char* const async_or_sync = is_async ? "async" : "sync";
 839   log_info(aot, heap)("start to finish materialization time: " UINT64_FORMAT "us",
 840                       total_duration_us);
 841   log_info(aot, heap)("early object materialization time (%s): " UINT64_FORMAT "us",
 842                       async_or_sync, _early_materialization_time_ns / 1000);
 843   log_info(aot, heap)("late object materialization time (%s): " UINT64_FORMAT "us",
 844                       async_or_sync, _late_materialization_time_ns / 1000);
 845   log_info(aot, heap)("object materialization cleanup time (%s): " UINT64_FORMAT "us",
 846                       async_or_sync, _cleanup_materialization_time_ns / 1000);
 847   log_info(aot, heap)("final object materialization time stall (sync): " UINT64_FORMAT "us",
 848                       _final_materialization_time_ns / 1000);
 849   log_info(aot, heap)("bootstrapping lazy materialization time (sync): " UINT64_FORMAT "us",
 850                       _accumulated_lazy_materialization_time_ns / 1000);
 851 
 852   uint64_t sync_time = _final_materialization_time_ns + _accumulated_lazy_materialization_time_ns;
 853   uint64_t async_time = _early_materialization_time_ns + _late_materialization_time_ns + _cleanup_materialization_time_ns;
 854 
 855   if (!is_async) {
 856     sync_time += async_time;
 857     async_time = 0;
 858   }
 859 
 860   log_info(aot, heap)("sync materialization time: " UINT64_FORMAT "us",
 861                       sync_time / 1000);
 862 
 863   log_info(aot, heap)("async materialization time: " UINT64_FORMAT "us",
 864                       async_time / 1000);
 865 
 866   uint64_t iterative_time = (uint64_t)(is_async ? async_time : sync_time);
 867   uint64_t materialized_bytes = _allocated_words * HeapWordSize;
 868   log_info(aot, heap)("%s materialized " UINT64_FORMAT "K (" UINT64_FORMAT "M/s)", async_or_sync,
 869                       materialized_bytes / 1024, uint64_t(materialized_bytes * UCONST64(1'000'000'000) / M / iterative_time));
 870 }
 871 
 872 void AOTStreamedHeapLoader::materialize_objects() {
 873   // We cannot handle any exception when materializing roots. Exits the VM.
 874   EXCEPTION_MARK
 875 
 876   // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
 877   HandleMark hm(THREAD);
 878 
 879   // Early materialization with a budget before GC is allowed
 880   MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
 881 
 882   materialize_early(CHECK);
 883   await_gc_enabled();
 884   materialize_late(CHECK);
 885   // Notify materialization is done
 886   AOTHeapLoading_lock->notify_all();
 887   cleanup();
 888 }
 889 
 890 void AOTStreamedHeapLoader::switch_object_index_to_handle(int object_index) {
 891   oop heap_object = cast_to_oop(_object_index_to_heap_object_table[object_index]);
 892   if (heap_object == nullptr) {
 893     return;
 894   }
 895 
 896   oop* handle = Universe::vm_global()->allocate();
 897   NativeAccess<>::oop_store(handle, heap_object);
 898   _object_index_to_heap_object_table[object_index] = handle;
 899 }
 900 
 901 void AOTStreamedHeapLoader::enable_gc() {
 902   if (AOTEagerlyLoadObjects && !IterativeObjectLoader::has_more()) {
 903     // Everything was loaded eagerly at early startup
 904     return;
 905   }
 906 
 907   // We cannot handle any exception when materializing roots. Exits the VM.
 908   EXCEPTION_MARK
 909 
 910   MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
 911 
 912   // First wait until no tracing is active
 913   while (_waiting_for_iterator) {
 914     AOTHeapLoading_lock->wait();
 915   }
 916 
 917   // Lock further tracing from starting
 918   _waiting_for_iterator = true;
 919 
 920   // Record iterator progress
 921   int num_handles = (int)_num_archived_objects;
 922 
 923   // Lock further iteration from starting
 924   _swapping_root_format = true;
 925 
 926   // Then wait for the iterator to stop
 927   while (_previous_batch_last_object_index != _current_batch_last_object_index) {
 928     AOTHeapLoading_lock->wait();
 929   }
 930 
 931   if (IterativeObjectLoader::has_more()) {
 932     // If there is more to be materialized, we have to upgrade the object index
 933     // to object mapping to use handles. If there isn't more to materialize, the
 934     // handle will no longer e used; they are only used to materialize objects.
 935 
 936     for (int i = 1; i <= num_handles; ++i) {
 937       // Upgrade the roots to use handles
 938       switch_object_index_to_handle(i);
 939     }
 940 
 941     // From now on, accessing the object table must be done through a handle.
 942     _objects_are_handles = true;
 943   }
 944 
 945   // Unlock tracing
 946   _waiting_for_iterator = false;
 947 
 948   // Unlock iteration
 949   _swapping_root_format = false;
 950 
 951   _allow_gc = true;
 952 
 953   AOTHeapLoading_lock->notify_all();
 954 
 955   if (AOTEagerlyLoadObjects && IterativeObjectLoader::has_more()) {
 956     materialize_late(CHECK);
 957     cleanup();
 958   }
 959 }
 960 
 961 void AOTStreamedHeapLoader::materialize_thread_object() {
 962   AOTThread::materialize_thread_object();
 963 }
 964 
 965 void AOTStreamedHeapLoader::finish_materialize_objects() {
 966   Ticks start = Ticks::now();
 967 
 968   if (_loading_all_objects) {
 969     MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
 970     // Wait for the AOT thread to finish
 971     while (IterativeObjectLoader::has_more()) {
 972       AOTHeapLoading_lock->wait();
 973     }
 974   } else {
 975     assert(!AOTEagerlyLoadObjects, "sanity");
 976     assert(_current_root_index == 0, "sanity");
 977     // Without the full module graph we have done only lazy tracing materialization.
 978     // Ensure all roots are processed here by triggering root loading on every root.
 979     for (int i = 0; i < _num_roots; ++i) {
 980       get_root(i);
 981     }
 982     cleanup();
 983   }
 984 
 985   _final_materialization_time_ns = (Ticks::now() - start).nanoseconds();
 986 }
 987 
 988 void account_lazy_materialization_time_ns(uint64_t time, const char* description, int index) {
 989   AtomicAccess::add(&_accumulated_lazy_materialization_time_ns, time);
 990   log_debug(aot, heap)("Lazy materialization of %s: %d end (" UINT64_FORMAT " us of " UINT64_FORMAT " us)", description, index, time / 1000, _accumulated_lazy_materialization_time_ns / 1000);
 991 }
 992 
 993 // Initialize an empty array of AOT heap roots; materialize them lazily
 994 void AOTStreamedHeapLoader::initialize() {
 995   EXCEPTION_MARK
 996 
 997   _materialization_start_ticks = Ticks::now();
 998 
 999   FileMapInfo::current_info()->map_bitmap_region();
1000 
1001   _heap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
1002   _bitmap_region = FileMapInfo::current_info()->region_at(AOTMetaspace::bm);
1003 
1004   assert(_heap_region->used() > 0, "empty heap archive?");
1005 
1006   _is_in_use = true;
1007 
1008   // archived roots are at this offset in the stream.
1009   size_t roots_offset = FileMapInfo::current_info()->streamed_heap()->roots_offset();
1010   size_t forwarding_offset = FileMapInfo::current_info()->streamed_heap()->forwarding_offset();
1011   size_t root_highest_object_index_table_offset = FileMapInfo::current_info()->streamed_heap()->root_highest_object_index_table_offset();
1012   _num_archived_objects = FileMapInfo::current_info()->streamed_heap()->num_archived_objects();
1013 
1014   // The first int is the length of the array
1015   _roots_archive = ((int*)(((address)_heap_region->mapped_base()) + roots_offset)) + 1;
1016   _num_roots = _roots_archive[-1];
1017   _heap_region_used = _heap_region->used();
1018 
1019   // We can't retire a TLAB until the filler klass is set; set it to the archived object klass.
1020   CollectedHeap::set_filler_object_klass(vmClasses::Object_klass());
1021 
1022   objArrayOop roots = oopFactory::new_objectArray(_num_roots, CHECK);
1023   _roots = OopHandle(Universe::vm_global(), roots);
1024 
1025   _object_index_to_buffer_offset_table = (size_t*)(((address)_heap_region->mapped_base()) + forwarding_offset);
1026   // We allocate the first entry for "null"
1027   _object_index_to_heap_object_table = NEW_C_HEAP_ARRAY(void*, _num_archived_objects + 1, mtClassShared);
1028   Copy::zero_to_bytes(_object_index_to_heap_object_table, (_num_archived_objects + 1) * sizeof(void*));
1029 
1030   _root_highest_object_index_table = (int*)(((address)_heap_region->mapped_base()) + root_highest_object_index_table_offset);
1031 
1032   address start = (address)(_bitmap_region->mapped_base()) + _heap_region->oopmap_offset();
1033   _oopmap = BitMapView((BitMap::bm_word_t*)start, _heap_region->oopmap_size_in_bits());
1034 
1035 
1036   if (FLAG_IS_DEFAULT(AOTEagerlyLoadObjects)) {
1037     // Concurrency will not help much if there are no extra cores available.
1038     FLAG_SET_ERGO(AOTEagerlyLoadObjects, os::initial_active_processor_count() <= 1);
1039   }
1040 
1041   // If the full module graph is not available or the JVMTI class file load hook is on, we
1042   // will prune the object graph to not include cached objects in subgraphs that are not intended
1043   // to be loaded.
1044   _loading_all_objects = CDSConfig::is_using_full_module_graph() && !JvmtiExport::should_post_class_file_load_hook();
1045   if (!_loading_all_objects) {
1046     // When not using FMG, fall back to tracing materialization
1047     FLAG_SET_ERGO(AOTEagerlyLoadObjects, false);
1048     return;
1049   }
1050 
1051   if (AOTEagerlyLoadObjects) {
1052     // Objects are laid out in DFS order; DFS traverse the roots by linearly walking all objects
1053     HandleMark hm(THREAD);
1054 
1055     // Early materialization with a budget before GC is allowed
1056     MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1057 
1058     bool finished_before_gc_allowed = materialize_early(CHECK);
1059     if (finished_before_gc_allowed) {
1060       cleanup();
1061     }
1062   } else {
1063     AOTThread::initialize();
1064   }
1065 }
1066 
1067 oop AOTStreamedHeapLoader::materialize_root(int root_index) {
1068   Ticks start = Ticks::now();
1069   // We cannot handle any exception when materializing a root. Exits the VM.
1070   EXCEPTION_MARK
1071   Stack<AOTHeapTraversalEntry, mtClassShared> dfs_stack;
1072   HandleMark hm(THREAD);
1073 
1074   oop result;
1075   {
1076     MutexLocker ml(AOTHeapLoading_lock, Mutex::_safepoint_check_flag);
1077 
1078     oop root = objArrayOop(_roots.resolve())->obj_at(root_index);
1079 
1080     if (root != nullptr) {
1081       // The root has already been materialized
1082       result = root;
1083     } else {
1084       // The root has not been materialized, start tracing materialization
1085       result = TracingObjectLoader::materialize_root(root_index, dfs_stack, CHECK_NULL);
1086     }
1087   }
1088 
1089   uint64_t duration = (Ticks::now() - start).nanoseconds();
1090 
1091   account_lazy_materialization_time_ns(duration, "root", root_index);
1092 
1093   return result;
1094 }
1095 
1096 oop AOTStreamedHeapLoader::get_root(int index) {
1097   oop result = objArrayOop(_roots.resolve())->obj_at(index);
1098   if (result == nullptr) {
1099     // Materialize root
1100     result = materialize_root(index);
1101   }
1102   if (result == _roots.resolve()) {
1103     // A self-reference to the roots array acts as a sentinel object for null,
1104     // indicating that the root has been cleared.
1105     result = nullptr;
1106   }
1107   // Acquire the root transitive object payload
1108   OrderAccess::acquire();
1109   return result;
1110 }
1111 
1112 void AOTStreamedHeapLoader::clear_root(int index) {
1113   // Self-reference to the roots array acts as a sentinel object for null,
1114   // indicating that the root has been cleared.
1115   objArrayOop(_roots.resolve())->obj_at_put(index, _roots.resolve());
1116 }
1117 
1118 void AOTStreamedHeapLoader::await_gc_enabled() {
1119   while (!_allow_gc) {
1120     AOTHeapLoading_lock->wait();
1121   }
1122 }
1123 
1124 void AOTStreamedHeapLoader::finish_initialization(FileMapInfo* static_mapinfo) {
1125   static_mapinfo->stream_heap_region();
1126 }
1127 
1128 AOTMapLogger::OopDataIterator* AOTStreamedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
1129   class StreamedLoaderOopIterator : public AOTStreamedHeapOopIterator {
1130   public:
1131     StreamedLoaderOopIterator(address buffer_start,
1132                               int num_archived_objects)
1133       : AOTStreamedHeapOopIterator(buffer_start, num_archived_objects) {}
1134 
1135     AOTMapLogger::OopData capture(int dfs_index) override {
1136       size_t buffered_offset = buffer_offset_for_object_index(dfs_index);
1137       address buffered_addr = _buffer_start + buffered_offset;
1138       oopDesc* raw_oop = (oopDesc*)buffered_addr;
1139       size_t size = archive_object_size(raw_oop);
1140 
1141       intptr_t target_location = (intptr_t)buffered_offset;
1142       uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
1143       Klass* klass = raw_oop->klass();
1144 
1145       address requested_addr = (address)buffered_offset;
1146 
1147       return { buffered_addr,
1148                requested_addr,
1149                target_location,
1150                narrow_location,
1151                raw_oop,
1152                klass,
1153                size,
1154                false };
1155     }
1156 
1157     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
1158       GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
1159 
1160       for (int i = 0; i < _num_roots; ++i) {
1161         int object_index = object_index_for_root_index(i);
1162         result->append(capture(object_index));
1163       }
1164 
1165       return result;
1166     }
1167   };
1168 
1169   assert(_is_in_use, "printing before initializing?");
1170 
1171   return new StreamedLoaderOopIterator(buffer_start, (int)info->streamed_heap()->num_archived_objects());
1172 }
1173 
1174 #endif // INCLUDE_CDS_JAVA_HEAP