1 /*
   2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/archiveBuilder.hpp"
  28 #include "cds/archiveHeapLoader.hpp"
  29 #include "cds/archiveHeapWriter.hpp"
  30 #include "cds/archiveUtils.hpp"
  31 #include "cds/cdsAccess.hpp"
  32 #include "cds/cdsConfig.hpp"
  33 #include "cds/cdsEnumKlass.hpp"
  34 #include "cds/cdsHeapVerifier.hpp"
  35 #include "cds/heapShared.hpp"
  36 #include "cds/metaspaceShared.hpp"
  37 #include "classfile/classLoaderData.hpp"
  38 #include "classfile/classLoaderExt.hpp"
  39 #include "classfile/javaClasses.inline.hpp"
  40 #include "classfile/modules.hpp"
  41 #include "classfile/stringTable.hpp"
  42 #include "classfile/symbolTable.hpp"
  43 #include "classfile/systemDictionary.hpp"
  44 #include "classfile/systemDictionaryShared.hpp"
  45 #include "classfile/vmClasses.hpp"
  46 #include "classfile/vmSymbols.hpp"
  47 #include "gc/shared/collectedHeap.hpp"
  48 #include "gc/shared/gcLocker.hpp"
  49 #include "gc/shared/gcVMOperations.hpp"
  50 #include "logging/log.hpp"
  51 #include "logging/logStream.hpp"
  52 #include "memory/iterator.inline.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 #include "oops/compressedOops.inline.hpp"
  56 #include "oops/fieldStreams.inline.hpp"
  57 #include "oops/objArrayOop.inline.hpp"
  58 #include "oops/oop.inline.hpp"
  59 #include "oops/typeArrayOop.inline.hpp"
  60 #include "prims/jvmtiExport.hpp"
  61 #include "runtime/arguments.hpp"
  62 #include "runtime/fieldDescriptor.inline.hpp"
  63 #include "runtime/init.hpp"
  64 #include "runtime/javaCalls.hpp"
  65 #include "runtime/mutexLocker.hpp"
  66 #include "runtime/safepointVerifiers.hpp"
  67 #include "utilities/bitMap.inline.hpp"
  68 #include "utilities/copy.hpp"
  69 #if INCLUDE_G1GC
  70 #include "gc/g1/g1CollectedHeap.hpp"
  71 #endif
  72 
  73 #if INCLUDE_CDS_JAVA_HEAP
  74 
  75 struct ArchivableStaticFieldInfo {
  76   const char* klass_name;
  77   const char* field_name;
  78   InstanceKlass* klass;
  79   int offset;
  80   BasicType type;
  81 
  82   ArchivableStaticFieldInfo(const char* k, const char* f)
  83   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  84 
  85   bool valid() {
  86     return klass_name != nullptr;
  87   }
  88 };
  89 
  90 class HeapShared::ArchivingObjectMark : public StackObj {
  91 public:
  92   ArchivingObjectMark(oop obj) {
  93     _trace->push(obj);
  94   }
  95   ~ArchivingObjectMark() {
  96     _trace->pop();
  97   }
  98 };
  99 
 100 class HeapShared::ContextMark : public StackObj {
 101   ResourceMark rm;
 102 public:
 103   ContextMark(const char* c) : rm{} {
 104     _context->push(c);
 105   }
 106   ~ContextMark() {
 107     _context->pop();
 108   }
 109 };
 110 
 111 bool HeapShared::_disable_writing = false;
 112 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
 113 
 114 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
 115 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
 116 size_t HeapShared::_total_obj_count;
 117 size_t HeapShared::_total_obj_size;
 118 
 119 #ifndef PRODUCT
 120 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 121 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 122 static const char* _test_class_name = nullptr;
 123 static Klass* _test_class = nullptr;
 124 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 125 #endif
 126 
 127 
 128 //
 129 // If you add new entries to the following tables, you should know what you're doing!
 130 //
 131 
 132 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 133   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 134   {"java/lang/Long$LongCache",                    "archivedCache"},
 135   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 136   {"java/lang/Short$ShortCache",                  "archivedCache"},
 137   {"java/lang/Character$CharacterCache",          "archivedCache"},
 138   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 139   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 140   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 141   {"java/util/ImmutableCollections",              "archivedObjects"},
 142   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 143   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 144   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 145   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
 146 
 147 #ifndef PRODUCT
 148   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 149 #endif
 150   {nullptr, nullptr},
 151 };
 152 
 153 // full module graph
 154 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 155   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 156   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 157   {"java/lang/Module$ArchivedData",               "archivedData"},
 158   {nullptr, nullptr},
 159 };
 160 
 161 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 162 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 163 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
 164 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_trace = nullptr;
 165 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
 166 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
 167 int HeapShared::_root_segment_max_size_elems;
 168 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 169 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
 170 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
 171 
 172 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 173   for (int i = 0; fields[i].valid(); i++) {
 174     if (fields[i].klass == ik) {
 175       return true;
 176     }
 177   }
 178   return false;
 179 }
 180 
 181 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 182   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 183          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 184 }
 185 
 186 unsigned HeapShared::oop_hash(oop const& p) {
 187   // Do not call p->identity_hash() as that will update the
 188   // object header.
 189   return primitive_hash(cast_from_oop<intptr_t>(p));
 190 }
 191 
 192 static void reset_states(oop obj, TRAPS) {
 193   Handle h_obj(THREAD, obj);
 194   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 195   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 196   Symbol* method_sig = vmSymbols::void_method_signature();
 197 
 198   while (klass != nullptr) {
 199     Method* method = klass->find_method(method_name, method_sig);
 200     if (method != nullptr) {
 201       assert(method->is_private(), "must be");
 202       if (log_is_enabled(Debug, cds)) {
 203         ResourceMark rm(THREAD);
 204         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 205       }
 206       JavaValue result(T_VOID);
 207       JavaCalls::call_special(&result, h_obj, klass,
 208                               method_name, method_sig, CHECK);
 209     }
 210     klass = klass->java_super();
 211   }
 212 }
 213 
 214 void HeapShared::reset_archived_object_states(TRAPS) {
 215   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 216   log_debug(cds)("Resetting platform loader");
 217   reset_states(SystemDictionary::java_platform_loader(), CHECK);
 218   log_debug(cds)("Resetting system loader");
 219   reset_states(SystemDictionary::java_system_loader(), CHECK);
 220 
 221   // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not
 222   // directly used for class loading, but rather is used by the core library
 223   // to keep track of resources, etc, loaded by the null class loader.
 224   //
 225   // Note, this object is non-null, and is not the same as
 226   // ClassLoaderData::the_null_class_loader_data()->class_loader(),
 227   // which is null.
 228   log_debug(cds)("Resetting boot loader");
 229   JavaValue result(T_OBJECT);
 230   JavaCalls::call_static(&result,
 231                          vmClasses::jdk_internal_loader_ClassLoaders_klass(),
 232                          vmSymbols::bootLoader_name(),
 233                          vmSymbols::void_BuiltinClassLoader_signature(),
 234                          CHECK);
 235   Handle boot_loader(THREAD, result.get_oop());
 236   reset_states(boot_loader(), CHECK);
 237 }
 238 
 239 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 240 
 241 bool HeapShared::has_been_archived(oop obj) {
 242   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 243   return archived_object_cache()->get(obj) != nullptr;
 244 }
 245 
 246 int HeapShared::append_root(oop obj) {
 247   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 248 
 249   // No GC should happen since we aren't scanning _pending_roots.
 250   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 251 
 252   if (_pending_roots == nullptr) {
 253     _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
 254   }
 255 
 256   OopHandle oh(Universe::vm_global(), obj);
 257   return _pending_roots->append(oh);
 258 }
 259 
 260 objArrayOop HeapShared::root_segment(int segment_idx) {
 261   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
 262     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 263     if (!HeapShared::can_write()) {
 264       return nullptr;
 265     }
 266   } else {
 267     assert(CDSConfig::is_using_archive(), "must be");
 268   }
 269 
 270   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 271   assert(segment != nullptr, "should have been initialized");
 272   return segment;
 273 }
 274 
 275 inline unsigned int oop_handle_hash(const OopHandle& oh) {
 276   oop o = oh.resolve();
 277   if (o == nullptr) {
 278     return 0;
 279   } else {
 280     return o->identity_hash();
 281   }
 282 }
 283 
 284 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 285   return a.resolve() == b.resolve();
 286 }
 287 
 288 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
 289     36137, // prime number
 290     AnyObj::C_HEAP,
 291     mtClassShared,
 292     oop_handle_hash,
 293     oop_handle_equals> {};
 294 
 295 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
 296 
 297 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
 298   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 299   if (_orig_to_scratch_object_table == nullptr) {
 300     _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
 301   }
 302 
 303   OopHandle orig_h(Universe::vm_global(), orig_obj);
 304   OopHandle scratch_h(Universe::vm_global(), scratch_obj);
 305   _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
 306 }
 307 
 308 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
 309   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 310   if (_orig_to_scratch_object_table != nullptr) {
 311     OopHandle orig(&orig_obj);
 312     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 313     if (v != nullptr) {
 314       return v->resolve();
 315     }
 316   }
 317   return nullptr;
 318 }
 319 
 320 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
 321 // to Strings and MH oops.
 322 //
 323 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
 324 // and are accssed vis CDSAccess::get_archived_object(int).
 325 struct PermanentOopInfo {
 326   int _index;       // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
 327   int _heap_offset; // Offset of the object from the bottom of the archived heap.
 328   PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
 329 };
 330 
 331 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
 332     36137, // prime number
 333     AnyObj::C_HEAP,
 334     mtClassShared,
 335     oop_handle_hash,
 336     oop_handle_equals> {};
 337 
 338 static int _dumptime_permanent_oop_count = 0;
 339 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
 340 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
 341 
 342 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
 343 // so we can remember their offset (from the bottom of the archived heap).
 344 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
 345   assert_at_safepoint();
 346   if (_dumptime_permanent_oop_table == nullptr) {
 347     _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
 348   }
 349 
 350   PermanentOopInfo info(-1, offset);
 351   OopHandle oh(Universe::vm_global(), obj);
 352   _dumptime_permanent_oop_table->put_when_absent(oh, info);
 353 }
 354 
 355 // A permanent index is assigned to an archived object ONLY when
 356 // the AOT compiler calls this function.
 357 int HeapShared::get_archived_object_permanent_index(oop obj) {
 358   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 359 
 360   if (!CDSConfig::is_dumping_heap()) {
 361     return -1; // Called by the Leyden old workflow
 362   }
 363   if (_dumptime_permanent_oop_table == nullptr) {
 364     return -1;
 365   }
 366 
 367   if (_orig_to_scratch_object_table != nullptr) {
 368     OopHandle orig(&obj);
 369     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 370     if (v != nullptr) {
 371       obj = v->resolve();
 372     }
 373   }
 374 
 375   OopHandle tmp(&obj);
 376   PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
 377   if (info == nullptr) {
 378     return -1;
 379   } else {
 380     if (info->_index < 0) {
 381       info->_index = _dumptime_permanent_oop_count++;
 382     }
 383     return info->_index;
 384   }
 385 }
 386 
 387 oop HeapShared::get_archived_object(int permanent_index) {
 388   assert(permanent_index >= 0, "sanity");
 389   assert(ArchiveHeapLoader::is_in_use(), "sanity");
 390   assert(_runtime_permanent_oops != nullptr, "sanity");
 391 
 392   return _runtime_permanent_oops->at(permanent_index).resolve();
 393 }
 394 
 395 // Remember all archived heap objects that have a permanent index.
 396 //   table[i] = offset of oop whose permanent index is i.
 397 void CachedCodeDirectoryInternal::dumptime_init_internal() {
 398   const int count = _dumptime_permanent_oop_count;
 399   int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
 400   for (int i = 0; i < count; i++) {
 401     table[count] = -1;
 402   }
 403   _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
 404     int index = info._index;
 405     if (index >= 0) {
 406       assert(index < count, "sanity");
 407       table[index] = info._heap_offset;
 408     }
 409     return true; // continue
 410   });
 411 
 412   for (int i = 0; i < count; i++) {
 413     assert(table[i] >= 0, "must be");
 414   }
 415 
 416   log_info(cds)("Dumped %d permanent oops", count);
 417 
 418   _permanent_oop_count = count;
 419   CDSAccess::set_pointer(&_permanent_oop_offsets, table);
 420 }
 421 
 422 // This is called during the bootstrap of the production run, before any GC can happen.
 423 // Record each permanent oop in a OopHandle for GC safety.
 424 void CachedCodeDirectoryInternal::runtime_init_internal() {
 425   int count = _permanent_oop_count;
 426   int* table = _permanent_oop_offsets;
 427   _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
 428   for (int i = 0; i < count; i++) {
 429     oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
 430     OopHandle oh(Universe::vm_global(), obj);
 431     _runtime_permanent_oops->append(oh);
 432   }
 433 };
 434 
 435 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 436   assert(_root_segment_max_size_elems > 0, "sanity");
 437 
 438   // Try to avoid divisions for the common case.
 439   if (idx < _root_segment_max_size_elems) {
 440     seg_idx = 0;
 441     int_idx = idx;
 442   } else {
 443     seg_idx = idx / _root_segment_max_size_elems;
 444     int_idx = idx % _root_segment_max_size_elems;
 445   }
 446 
 447   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 448          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 449 }
 450 
 451 // Returns an objArray that contains all the roots of the archived objects
 452 oop HeapShared::get_root(int index, bool clear) {
 453   assert(index >= 0, "sanity");
 454   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
 455   assert(!_root_segments->is_empty(), "must have loaded shared heap");
 456   int seg_idx, int_idx;
 457   get_segment_indexes(index, seg_idx, int_idx);
 458   oop result = root_segment(seg_idx)->obj_at(int_idx);
 459   if (clear) {
 460     clear_root(index);
 461   }
 462   return result;
 463 }
 464 
 465 void HeapShared::clear_root(int index) {
 466   assert(index >= 0, "sanity");
 467   assert(CDSConfig::is_using_archive(), "must be");
 468   if (ArchiveHeapLoader::is_in_use()) {
 469     int seg_idx, int_idx;
 470     get_segment_indexes(index, seg_idx, int_idx);
 471     if (log_is_enabled(Debug, cds, heap)) {
 472       oop old = root_segment(seg_idx)->obj_at(int_idx);
 473       log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
 474     }
 475     root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
 476   }
 477 }
 478 
 479 bool HeapShared::archive_object(oop obj) {
 480   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 481 
 482   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 483   if (has_been_archived(obj)) {
 484     return true;
 485   }
 486 
 487   if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
 488     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 489                          p2i(obj), obj->size());
 490     debug_trace();
 491     return false;
 492   } else {
 493     count_allocation(obj->size());
 494     ArchiveHeapWriter::add_source_obj(obj);
 495     CachedOopInfo info = make_cached_oop_info(obj);
 496     archived_object_cache()->put_when_absent(obj, info);
 497     archived_object_cache()->maybe_grow();
 498     mark_native_pointers(obj);
 499 
 500     if (log_is_enabled(Debug, cds, heap)) {
 501       ResourceMark rm;
 502       LogTarget(Debug, cds, heap) log;
 503       LogStream out(log);
 504       out.print("Archived heap object " PTR_FORMAT " : %s ",
 505                 p2i(obj), obj->klass()->external_name());
 506       if (java_lang_Class::is_instance(obj)) {
 507         Klass* k = java_lang_Class::as_Klass(obj);
 508         if (k != nullptr) {
 509           out.print("%s", k->external_name());
 510         } else {
 511           out.print("primitive");
 512         }
 513       }
 514       out.cr();
 515     }
 516 
 517     if (java_lang_Module::is_instance(obj) && Modules::check_archived_module_oop(obj)) {
 518       Modules::update_oops_in_archived_module(obj, append_root(obj));
 519     }
 520 
 521     return true;
 522   }
 523 }
 524 
 525 class MetaspaceObjToOopHandleTable: public ResourceHashtable<MetaspaceObj*, OopHandle,
 526     36137, // prime number
 527     AnyObj::C_HEAP,
 528     mtClassShared> {
 529 public:
 530   oop get_oop(MetaspaceObj* ptr) {
 531     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 532     OopHandle* handle = get(ptr);
 533     if (handle != nullptr) {
 534       return handle->resolve();
 535     } else {
 536       return nullptr;
 537     }
 538   }
 539   void set_oop(MetaspaceObj* ptr, oop o) {
 540     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 541     OopHandle handle(Universe::vm_global(), o);
 542     bool is_new = put(ptr, handle);
 543     assert(is_new, "cannot set twice");
 544   }
 545   void remove_oop(MetaspaceObj* ptr) {
 546     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 547     OopHandle* handle = get(ptr);
 548     if (handle != nullptr) {
 549       handle->release(Universe::vm_global());
 550       remove(ptr);
 551     }
 552   }
 553 };
 554 
 555 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 556   if (_scratch_references_table == nullptr) {
 557     _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
 558   }
 559   _scratch_references_table->set_oop(src, dest);
 560 }
 561 
 562 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 563   return (objArrayOop)_scratch_references_table->get_oop(src);
 564 }
 565 
 566 void HeapShared::init_scratch_objects(TRAPS) {
 567   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 568     BasicType bt = (BasicType)i;
 569     if (!is_reference_type(bt)) {
 570       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 571       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 572       track_scratch_object(Universe::java_mirror(bt), m);
 573     }
 574   }
 575   _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
 576   if (_scratch_references_table == nullptr) {
 577     _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
 578   }
 579 }
 580 
 581 // Given java_mirror that represents a (primitive or reference) type T,
 582 // return the "scratch" version that represents the same type T.
 583 // Note that if java_mirror will be returned if it's already a
 584 // scratch mirror.
 585 //
 586 // See java_lang_Class::create_scratch_mirror() for more info.
 587 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 588   assert(java_lang_Class::is_instance(java_mirror), "must be");
 589 
 590   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 591     BasicType bt = (BasicType)i;
 592     if (!is_reference_type(bt)) {
 593       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 594         return java_mirror;
 595       }
 596     }
 597   }
 598 
 599   if (java_lang_Class::is_primitive(java_mirror)) {
 600     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 601   } else {
 602     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 603   }
 604 }
 605 
 606 oop HeapShared::scratch_java_mirror(BasicType t) {
 607   assert((uint)t < T_VOID+1, "range check");
 608   assert(!is_reference_type(t), "sanity");
 609   return _scratch_basic_type_mirrors[t].resolve();
 610 }
 611 
 612 oop HeapShared::scratch_java_mirror(Klass* k) {
 613   return _scratch_java_mirror_table->get_oop(k);
 614 }
 615 
 616 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
 617   track_scratch_object(k->java_mirror(), mirror);
 618   _scratch_java_mirror_table->set_oop(k, mirror);
 619 }
 620 
 621 void HeapShared::remove_scratch_objects(Klass* k) {
 622   // Klass is being deallocated. Java mirror can still be alive, and it should not
 623   // point to dead klass. We need to break the link from mirror to the Klass.
 624   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 625   oop mirror = _scratch_java_mirror_table->get_oop(k);
 626   if (mirror != nullptr) {
 627     java_lang_Class::set_klass(mirror, nullptr);
 628   }
 629   _scratch_java_mirror_table->remove_oop(k);
 630   if (k->is_instance_klass()) {
 631     _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
 632   }
 633   if (mirror != nullptr) {
 634     OopHandle tmp(&mirror);
 635     OopHandle* v = _orig_to_scratch_object_table->get(tmp);
 636     if (v != nullptr) {
 637       oop scratch_mirror = v->resolve();
 638       java_lang_Class::set_klass(scratch_mirror, nullptr);
 639       _orig_to_scratch_object_table->remove(tmp);
 640     }
 641   }
 642 }
 643 
 644 //TODO: we eventually want a more direct test for these kinds of things.
 645 //For example the JVM could record some bit of context from the creation
 646 //of the klass, such as who called the hidden class factory.  Using
 647 //string compares on names is fragile and will break as soon as somebody
 648 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 649 //related ideas about marking AOT-related classes.
 650 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 651   return ik->is_hidden() &&
 652     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 653      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 654      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 655      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 656 }
 657 
 658 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 659   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 660 }
 661 
 662 bool HeapShared::is_string_concat_klass(InstanceKlass* ik) {
 663   return ik->is_hidden() && ik->name()->starts_with("java/lang/String$$StringConcat");
 664 }
 665 
 666 bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) {
 667   return CDSConfig::is_dumping_invokedynamic() &&
 668     (is_lambda_form_klass(ik) || is_lambda_proxy_klass(ik) || is_string_concat_klass(ik));
 669 }
 670 
 671 void HeapShared::copy_aot_initialized_mirror(Klass* orig_k, oop orig_mirror, oop m) {
 672   assert(orig_k->is_instance_klass(), "sanity");
 673   InstanceKlass* ik = InstanceKlass::cast(orig_k);
 674   InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(ik);
 675 
 676   assert(ik->is_initialized(), "must be");
 677 
 678   int nfields = 0;
 679   for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
 680     if (fs.access_flags().is_static()) {
 681       fieldDescriptor& fd = fs.field_descriptor();
 682       int offset = fd.offset();
 683       switch (fd.field_type()) {
 684       case T_OBJECT:
 685       case T_ARRAY:
 686         m->obj_field_put(offset, orig_mirror->obj_field(offset));
 687         break;
 688       case T_BOOLEAN:
 689         m->bool_field_put(offset, orig_mirror->bool_field(offset));
 690         break;
 691       case T_BYTE:
 692         m->byte_field_put(offset, orig_mirror->byte_field(offset));
 693         break;
 694       case T_SHORT:
 695         m->short_field_put(offset, orig_mirror->short_field(offset));
 696         break;
 697       case T_CHAR:
 698         m->char_field_put(offset, orig_mirror->char_field(offset));
 699         break;
 700       case T_INT:
 701         m->int_field_put(offset, orig_mirror->int_field(offset));
 702         break;
 703       case T_LONG:
 704         m->long_field_put(offset, orig_mirror->long_field(offset));
 705         break;
 706       case T_FLOAT:
 707         m->float_field_put(offset, orig_mirror->float_field(offset));
 708         break;
 709       case T_DOUBLE:
 710         m->double_field_put(offset, orig_mirror->double_field(offset));
 711         break;
 712       default:
 713         ShouldNotReachHere();
 714       }
 715       nfields ++;
 716     }
 717   }
 718 
 719   java_lang_Class::set_class_data(m, java_lang_Class::class_data(orig_mirror));
 720 
 721   // Class::reflectData use SoftReference, which cannot be archived. Set it
 722   // to null and it will be recreated at runtime.
 723   java_lang_Class::set_reflection_data(m, nullptr);
 724 
 725   if (log_is_enabled(Info, cds, init)) {
 726     ResourceMark rm;
 727     log_debug(cds, init)("copied %3d field(s) in aot-initialized mirror %s%s", nfields, ik->external_name(),
 728                          ik->is_hidden() ? " (hidden)" : "");
 729   }
 730 }
 731 
 732 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
 733   // We need to retain the identity_hash, because it may have been used by some hashtables
 734   // in the shared heap.
 735   if (!orig_mirror->fast_no_hash_check()) {
 736     intptr_t src_hash = orig_mirror->identity_hash();
 737     if (UseCompactObjectHeaders) {
 738       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 739       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 740     } else {
 741       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 742     }
 743     assert(scratch_m->mark().is_unlocked(), "sanity");
 744 
 745     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 746     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 747   }
 748 }
 749 
 750 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 751   InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(src_ik);
 752   if (buffered_ik->is_shared_boot_class() ||
 753       buffered_ik->is_shared_platform_class() ||
 754       buffered_ik->is_shared_app_class()) {
 755     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 756     if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
 757       return HeapShared::scratch_resolved_references(src_ik->constants());
 758     }
 759   }
 760   return nullptr;
 761 }
 762 
 763 void HeapShared::archive_java_mirrors() {
 764   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 765     BasicType bt = (BasicType)i;
 766     if (!is_reference_type(bt)) {
 767       oop orig_mirror = Universe::java_mirror(bt);
 768       oop m = _scratch_basic_type_mirrors[i].resolve();
 769       assert(m != nullptr, "sanity");
 770       copy_java_mirror_hashcode(orig_mirror, m);
 771       bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 772       assert(success, "sanity");
 773 
 774       log_trace(cds, heap, mirror)(
 775         "Archived %s mirror object from " PTR_FORMAT,
 776         type2name(bt), p2i(m));
 777 
 778       Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
 779     }
 780   }
 781 
 782   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
 783   assert(klasses != nullptr, "sanity");
 784 
 785   for (int i = 0; i < klasses->length(); i++) {
 786     Klass* orig_k = klasses->at(i);
 787     oop orig_mirror = orig_k->java_mirror();
 788     oop m = scratch_java_mirror(orig_k);
 789     if (m != nullptr) {
 790       copy_java_mirror_hashcode(orig_mirror, m);
 791     }
 792   }
 793 
 794   for (int i = 0; i < klasses->length(); i++) {
 795     Klass* orig_k = klasses->at(i);
 796     oop orig_mirror = orig_k->java_mirror();
 797     oop m = scratch_java_mirror(orig_k);
 798     if (m != nullptr) {
 799       Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
 800       bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 801       guarantee(success, "scratch mirrors must point to only archivable objects");
 802       buffered_k->set_archived_java_mirror(append_root(m));
 803       ResourceMark rm;
 804       log_trace(cds, heap, mirror)(
 805         "Archived %s mirror object from " PTR_FORMAT,
 806         buffered_k->external_name(), p2i(m));
 807 
 808       // archive the resolved_referenes array
 809       if (buffered_k->is_instance_klass()) {
 810         InstanceKlass* ik = InstanceKlass::cast(buffered_k);
 811         objArrayOop rr = get_archived_resolved_references(InstanceKlass::cast(orig_k));
 812         if (rr != nullptr) {
 813           bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
 814           assert(success, "must be");
 815           int root_index = append_root(rr);
 816           ik->constants()->cache()->set_archived_references(root_index);
 817         }
 818       }
 819     }
 820   }
 821 }
 822 
 823 void HeapShared::archive_strings() {
 824   oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
 825   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
 826   // We must succeed because:
 827   // - _dumped_interned_strings do not contain any large strings.
 828   // - StringTable::init_shared_table() doesn't create any large arrays.
 829   assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
 830   StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
 831 }
 832 
 833 int HeapShared::archive_exception_instance(oop exception) {
 834   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 835   assert(success, "sanity");
 836   return append_root(exception);
 837 }
 838 
 839 void HeapShared::mark_native_pointers(oop orig_obj) {
 840   if (java_lang_Class::is_instance(orig_obj)) {
 841     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
 842     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
 843   } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
 844     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
 845   }
 846 }
 847 
 848 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
 849   CachedOopInfo* info = archived_object_cache()->get(src_obj);
 850   assert(info != nullptr, "must be");
 851   has_oop_pointers = info->has_oop_pointers();
 852   has_native_pointers = info->has_native_pointers();
 853 }
 854 
 855 void HeapShared::set_has_native_pointers(oop src_obj) {
 856   CachedOopInfo* info = archived_object_cache()->get(src_obj);
 857   assert(info != nullptr, "must be");
 858   info->set_has_native_pointers();
 859 }
 860 
 861 void HeapShared::start_finding_required_hidden_classes() {
 862   if (!CDSConfig::is_dumping_invokedynamic()) {
 863     return;
 864   }
 865   NoSafepointVerifier nsv;
 866 
 867   init_seen_objects_table();
 868 
 869   // We first scan the objects that are known to be archived (from the archive_subgraph
 870   // tables)
 871   find_required_hidden_classes_helper(archive_subgraph_entry_fields);
 872   if (CDSConfig::is_dumping_full_module_graph()) {
 873     find_required_hidden_classes_helper(fmg_archive_subgraph_entry_fields);
 874   }
 875 
 876   // Later, SystemDictionaryShared::find_all_archivable_classes_impl() will start
 877   // scanning the constant pools of all classes that it decides to archive.
 878 }
 879 
 880 void HeapShared::end_finding_required_hidden_classes() {
 881   if (!CDSConfig::is_dumping_invokedynamic()) {
 882     return;
 883   }
 884   NoSafepointVerifier nsv;
 885 
 886   delete_seen_objects_table();
 887 }
 888 
 889 void HeapShared::find_required_hidden_classes_helper(ArchivableStaticFieldInfo fields[]) {
 890   if (!CDSConfig::is_dumping_heap()) {
 891     return;
 892   }
 893   for (int i = 0; fields[i].valid(); i++) {
 894     ArchivableStaticFieldInfo* f = &fields[i];
 895     InstanceKlass* k = f->klass;
 896     oop m = k->java_mirror();
 897     oop o = m->obj_field(f->offset);
 898     if (o != nullptr) {
 899       find_required_hidden_classes_in_object(o);
 900     }
 901   }
 902 }
 903 
 904 class HeapShared::FindRequiredHiddenClassesOopClosure: public BasicOopIterateClosure {
 905   GrowableArray<oop> _stack;
 906   template <class T> void do_oop_work(T *p) {
 907     // Recurse on a GrowableArray to avoid overflowing the C stack.
 908     oop o = RawAccess<>::oop_load(p);
 909     if (o != nullptr) {
 910       _stack.append(o);
 911     }
 912   }
 913 
 914  public:
 915 
 916   void do_oop(narrowOop *p) { FindRequiredHiddenClassesOopClosure::do_oop_work(p); }
 917   void do_oop(      oop *p) { FindRequiredHiddenClassesOopClosure::do_oop_work(p); }
 918 
 919   FindRequiredHiddenClassesOopClosure(oop o) {
 920     _stack.append(o);
 921   }
 922   oop pop() {
 923     if (_stack.length() == 0) {
 924       return nullptr;
 925     } else {
 926       return _stack.pop();
 927     }
 928   }
 929 };
 930 
 931 static void mark_required_if_hidden_class(Klass* k) {
 932   if (k != nullptr && k->is_instance_klass()) {
 933     InstanceKlass* ik = InstanceKlass::cast(k);
 934     if (ik->is_hidden()) {
 935       SystemDictionaryShared::mark_required_hidden_class(ik);
 936     }
 937   }
 938 }
 939 
 940 
 941 void HeapShared::find_required_hidden_classes_in_object(oop root) {
 942   ResourceMark rm;
 943   FindRequiredHiddenClassesOopClosure c(root);
 944   oop o;
 945   while ((o = c.pop()) != nullptr) {
 946     if (!has_been_seen_during_subgraph_recording(o)) {
 947       set_has_been_seen_during_subgraph_recording(o);
 948 
 949       // Mark the klass of this object
 950       mark_required_if_hidden_class(o->klass());
 951 
 952       // For special objects, mark the klass that they contain information about.
 953       // - a Class that refers to an hidden class
 954       // - a ResolvedMethodName that refers to a method declared in a hidden class
 955       if (java_lang_Class::is_instance(o)) {
 956         mark_required_if_hidden_class(java_lang_Class::as_Klass(o));
 957       } else if (java_lang_invoke_ResolvedMethodName::is_instance(o)) {
 958         Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(o);
 959         if (m != nullptr) {
 960           mark_required_if_hidden_class(m->method_holder());
 961         }
 962       }
 963 
 964       o->oop_iterate(&c);
 965     }
 966   }
 967 }
 968 
 969 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
 970   {
 971     NoSafepointVerifier nsv;
 972 
 973     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 974     // for convenience.
 975     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
 976     _trace = new GrowableArrayCHeap<oop, mtClassShared>(250);
 977     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
 978 
 979     // Cache for recording where the archived objects are copied to
 980     create_archived_object_cache();
 981 
 982     if (UseCompressedOops || UseG1GC) {
 983       log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 984                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 985                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 986                     UseCompressedOops ? p2i(CompressedOops::end()) :
 987                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 988     }
 989     copy_objects();
 990 
 991     if (!SkipArchiveHeapVerification) {
 992       CDSHeapVerifier::verify();
 993     }
 994     check_special_subgraph_classes();
 995   }
 996 
 997   GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
 998   for (int i = 0; i < _pending_roots->length(); i++) {
 999     roots->append(_pending_roots->at(i).resolve());
1000   }
1001   ArchiveHeapWriter::write(roots, heap_info);
1002 }
1003 
1004 void HeapShared::copy_interned_strings() {
1005   init_seen_objects_table();
1006 
1007   auto copier = [&] (oop s, bool value_ignored) {
1008     assert(s != nullptr, "sanity");
1009     assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
1010     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, s);
1011     assert(success, "must be");
1012     // Prevent string deduplication from changing the value field to
1013     // something not in the archive.
1014     java_lang_String::set_deduplication_forbidden(s);
1015   };
1016   _dumped_interned_strings->iterate_all(copier);
1017 
1018   delete_seen_objects_table();
1019 }
1020 
1021 void HeapShared::copy_special_subgraph() {
1022   copy_interned_strings();
1023 
1024   init_seen_objects_table();
1025   {
1026     archive_java_mirrors();
1027     archive_strings();
1028     Universe::archive_exception_instances();
1029   }
1030   delete_seen_objects_table();
1031 }
1032 
1033 void HeapShared::prepare_resolved_references() {
1034   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
1035   for (int i = 0; i < klasses->length(); i++) {
1036     Klass* src_k = klasses->at(i);
1037     if (src_k->is_instance_klass()) {
1038       InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(InstanceKlass::cast(src_k));
1039       buffered_ik->constants()->prepare_resolved_references_for_archiving();
1040     }
1041   }
1042 }
1043 
1044 void HeapShared::copy_objects() {
1045   assert(HeapShared::can_write(), "must be");
1046 
1047   prepare_resolved_references();
1048   find_all_aot_initialized_classes();
1049   copy_special_subgraph();
1050 
1051   archive_object_subgraphs(archive_subgraph_entry_fields,
1052                            false /* is_full_module_graph */);
1053 
1054   if (CDSConfig::is_dumping_full_module_graph()) {
1055     archive_object_subgraphs(fmg_archive_subgraph_entry_fields,
1056                              true /* is_full_module_graph */);
1057     Modules::verify_archived_modules();
1058   }
1059 }
1060 
1061 // Closure used by HeapShared::scan_for_aot_initialized_classes() to look for all objects
1062 // that are reachable from a given root.
1063 class HeapShared::AOTInitializedClassScanner : public BasicOopIterateClosure {
1064   bool _made_progress;
1065 
1066   template <class T> void check(T *p) {
1067     oop obj = HeapAccess<>::oop_load(p);
1068     if (!java_lang_Class::is_instance(obj)) {
1069       // Don't scan the mirrors, as we may see an orig_mirror while scanning
1070       // the object graph, .... TODO more info
1071       _made_progress |= HeapShared::scan_for_aot_initialized_classes(obj);
1072     }
1073   }
1074 
1075 public:
1076   AOTInitializedClassScanner() : _made_progress(false) {}
1077   void do_oop(narrowOop *p) { check(p); }
1078   void do_oop(      oop *p) { check(p); }
1079   bool made_progress() { return _made_progress; }
1080 };
1081 
1082 // If <buffered_ik> has been initialized during the assembly phase, mark its
1083 // has_aot_initialized_mirror bit. And then do the same for all supertypes of
1084 // <buffered_ik>.
1085 //
1086 // Note: a super interface <intf> of <buffered_ik> may not have been initialized, if
1087 // <intf> has not declared any default methods.
1088 //
1089 // Note: this function doesn not call InstanceKlass::initialize() -- we are inside
1090 // a safepoint.
1091 //
1092 // Returns true if one or more classes have been newly marked.
1093 static bool mark_for_aot_initialization(InstanceKlass* buffered_ik) {
1094   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
1095   assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_ik), "sanity");
1096 
1097   if (buffered_ik->has_aot_initialized_mirror()) { // already marked
1098     return false;
1099   }
1100 
1101   bool made_progress = false;
1102   if (buffered_ik->is_initialized()) {
1103     if (log_is_enabled(Info, cds, init)) {
1104       ResourceMark rm;
1105       log_info(cds, init)("Mark class for aot-init: %s", buffered_ik->external_name());
1106     }
1107 
1108     InstanceKlass* src_ik = ArchiveBuilder::current()->get_source_addr(buffered_ik);
1109 
1110     // If we get here with a "wild" user class, which may have
1111     // uncontrolled <clinit> code, exit with an error.  Obviously
1112     // filtering logic upstream needs to detect APP classes and not mark
1113     // them for aot-init in the first place, but this will be the final
1114     // firewall.
1115 
1116 #ifndef PRODUCT
1117     // ArchiveHeapTestClass is used for a very small number of internal regression
1118     // tests (non-product builds only). It may initialize some unexpected classes.
1119     if (ArchiveHeapTestClass == nullptr)
1120 #endif
1121     {
1122       if (!src_ik->in_javabase_module()) {
1123         // Class/interface types in the boot loader may have been initialized as side effects
1124         // of JVM bootstrap code, so they are fine. But we need to check all other classes.
1125         if (buffered_ik->is_interface()) {
1126           // This probably means a bug in AOTConstantPoolResolver.::is_indy_resolution_deterministic()
1127           guarantee(!buffered_ik->interface_needs_clinit_execution_as_super(),
1128                     "should not have initialized an interface whose <clinit> might have unpredictable side effects");
1129         } else {
1130           // "normal" classes
1131           guarantee(HeapShared::is_archivable_hidden_klass(buffered_ik),
1132                     "should not have initialized any non-interface, non-hidden classes outside of java.base");
1133         }
1134       }
1135     }
1136 
1137 #if 0
1138     if (buffered_ik->name()->equals("jdk/internal/loader/NativeLibraries")) {  // FIXME -- leyden+JEP483 merge
1139       return false;
1140     }
1141 #endif
1142     buffered_ik->set_has_aot_initialized_mirror();
1143     if (AOTClassInitializer::is_runtime_setup_required(src_ik)) {
1144       buffered_ik->set_is_runtime_setup_required();
1145     }
1146     made_progress = true;
1147 
1148     InstanceKlass* super = buffered_ik->java_super();
1149     if (super != nullptr) {
1150       mark_for_aot_initialization(super);
1151     }
1152 
1153     Array<InstanceKlass*>* interfaces = buffered_ik->transitive_interfaces();
1154     for (int i = 0; i < interfaces->length(); i++) {
1155       InstanceKlass* intf = interfaces->at(i);
1156       mark_for_aot_initialization(intf);
1157       if (!intf->is_initialized()) {
1158         assert(!intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false), "sanity");
1159         assert(!intf->has_aot_initialized_mirror(), "must not be marked");
1160       }
1161     }
1162   }
1163 
1164   return made_progress;
1165 }
1166 
1167 void HeapShared::find_all_aot_initialized_classes() {
1168   if (!CDSConfig::is_dumping_aot_linked_classes()) {
1169     return;
1170   }
1171 
1172   init_seen_objects_table();
1173   find_all_aot_initialized_classes_helper();
1174   delete_seen_objects_table();
1175 }
1176 
1177 // Recursively find all class that should be aot-initialized:
1178 // - the class has at least one instance that can be reachable from the special subgraph; or
1179 // - the class is hard-coded in AOTClassInitializer::can_archive_initialized_mirror()
1180 void HeapShared::find_all_aot_initialized_classes_helper() {
1181   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
1182   assert(klasses != nullptr, "sanity");
1183 
1184   // First scan all resolved constant pools references.
1185   for (int i = 0; i < klasses->length(); i++) {
1186     Klass* src_k = klasses->at(i);
1187     if (src_k->is_instance_klass()) {
1188       InstanceKlass* src_ik = InstanceKlass::cast(src_k);
1189       InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(src_ik);
1190       objArrayOop rr = get_archived_resolved_references(src_ik);
1191       if (rr != nullptr) {
1192         objArrayOop scratch_rr = scratch_resolved_references(src_ik->constants());
1193         for (int i = 0; i < scratch_rr->length(); i++) {
1194           scan_for_aot_initialized_classes(scratch_rr->obj_at(i));
1195         }
1196       }
1197 
1198       // If a class is hard-coded to be aot-initialize, mark it as such.
1199       if (AOTClassInitializer::can_archive_initialized_mirror(src_ik)) {
1200         mark_for_aot_initialization(buffered_ik);
1201       }
1202     }
1203   }
1204 
1205   // These objects also belong to the special subgraph
1206   scan_for_aot_initialized_classes(Universe::null_ptr_exception_instance());
1207   scan_for_aot_initialized_classes(Universe::arithmetic_exception_instance());
1208   scan_for_aot_initialized_classes(Universe::internal_error_instance());
1209   scan_for_aot_initialized_classes(Universe::array_index_out_of_bounds_exception_instance());
1210   scan_for_aot_initialized_classes(Universe::array_store_exception_instance());
1211   scan_for_aot_initialized_classes(Universe::class_cast_exception_instance());
1212 
1213   bool made_progress;
1214   do {
1215     // In each pass, we copy the scratch mirrors of the classes that were marked
1216     // as aot-init in the previous pass. We then scan these mirrors, which may
1217     // mark more classes. Keep iterating until no more progress can be made.
1218     made_progress = false;
1219     for (int i = 0; i < klasses->length(); i++) {
1220       Klass* orig_k = klasses->at(i);
1221       if (orig_k->is_instance_klass()) {
1222         InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
1223         if (ArchiveBuilder::current()->get_buffered_addr(orig_ik)->has_aot_initialized_mirror()) {
1224           oop orig_mirror = orig_ik->java_mirror();
1225           oop scratch_mirror = scratch_java_mirror(orig_k);
1226           if (!has_been_seen_during_subgraph_recording(scratch_mirror)) {
1227             // Scan scratch_mirror instead of orig_mirror (which has fields like ClassLoader that
1228             // are not archived).
1229             copy_aot_initialized_mirror(orig_k, orig_mirror, scratch_mirror);
1230             made_progress |= scan_for_aot_initialized_classes(scratch_mirror);
1231           }
1232         }
1233       }
1234     }
1235   } while (made_progress);
1236 }
1237 
1238 bool HeapShared::scan_for_aot_initialized_classes(oop obj) {
1239   if (obj == nullptr || has_been_seen_during_subgraph_recording(obj)) {
1240     return false;
1241   }
1242   set_has_been_seen_during_subgraph_recording(obj);
1243 
1244   bool made_progress = false;
1245   Klass* k = obj->klass();
1246   if (k->is_instance_klass()) {
1247     InstanceKlass* orig_ik = InstanceKlass::cast(k);
1248     InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(orig_ik);
1249     made_progress = mark_for_aot_initialization(buffered_ik);
1250   }
1251 
1252   AOTInitializedClassScanner scanner;
1253   obj->oop_iterate(&scanner);
1254   made_progress |= scanner.made_progress();
1255   return made_progress;
1256 }
1257 
1258 //
1259 // Subgraph archiving support
1260 //
1261 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr;
1262 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
1263 
1264 // Get the subgraph_info for Klass k. A new subgraph_info is created if
1265 // there is no existing one for k. The subgraph_info records the "buffered"
1266 // address of the class.
1267 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
1268   assert(CDSConfig::is_dumping_heap(), "dump time only");
1269   bool created;
1270   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
1271   KlassSubGraphInfo* info =
1272     _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph),
1273                                                   &created);
1274   assert(created, "must not initialize twice");
1275   return info;
1276 }
1277 
1278 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
1279   assert(CDSConfig::is_dumping_heap(), "dump time only");
1280   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
1281   assert(info != nullptr, "must have been initialized");
1282   return info;
1283 }
1284 
1285 // Add an entry field to the current KlassSubGraphInfo.
1286 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
1287   assert(CDSConfig::is_dumping_heap(), "dump time only");
1288   if (_subgraph_entry_fields == nullptr) {
1289     _subgraph_entry_fields =
1290       new (mtClass) GrowableArray<int>(10, mtClass);
1291   }
1292   _subgraph_entry_fields->append(static_field_offset);
1293   _subgraph_entry_fields->append(HeapShared::append_root(v));
1294 }
1295 
1296 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
1297 // Only objects of boot classes can be included in sub-graph.
1298 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
1299   assert(CDSConfig::is_dumping_heap(), "dump time only");
1300   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
1301 
1302   if (_subgraph_object_klasses == nullptr) {
1303     _subgraph_object_klasses =
1304       new (mtClass) GrowableArray<Klass*>(50, mtClass);
1305   }
1306 
1307   assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
1308 
1309   if (_k == buffered_k) {
1310     // Don't add the Klass containing the sub-graph to it's own klass
1311     // initialization list.
1312     return;
1313   }
1314 
1315   if (buffered_k->is_instance_klass()) {
1316     if (CDSConfig::is_dumping_invokedynamic()) {
1317       assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class() ||
1318              HeapShared::is_lambda_proxy_klass(InstanceKlass::cast(buffered_k)),
1319             "we can archive only instances of boot classes or lambda proxy classes");
1320     } else {
1321       assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
1322              "must be boot class");
1323     }
1324     // vmClasses::xxx_klass() are not updated, need to check
1325     // the original Klass*
1326     if (orig_k == vmClasses::String_klass() ||
1327         orig_k == vmClasses::Object_klass()) {
1328       // Initialized early during VM initialization. No need to be added
1329       // to the sub-graph object class list.
1330       return;
1331     }
1332     if (buffered_k->has_aot_initialized_mirror()) {
1333       // No need to add to the runtime-init list.
1334       return;
1335     }
1336     check_allowed_klass(InstanceKlass::cast(orig_k));
1337   } else if (buffered_k->is_objArray_klass()) {
1338     Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
1339     if (abk->is_instance_klass()) {
1340       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
1341             "must be boot class");
1342       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1343     }
1344     if (buffered_k == Universe::objectArrayKlass()) {
1345       // Initialized early during Universe::genesis. No need to be added
1346       // to the list.
1347       return;
1348     }
1349   } else {
1350     assert(buffered_k->is_typeArray_klass(), "must be");
1351     // Primitive type arrays are created early during Universe::genesis.
1352     return;
1353   }
1354 
1355   if (log_is_enabled(Debug, cds, heap)) {
1356     if (!_subgraph_object_klasses->contains(buffered_k)) {
1357       ResourceMark rm;
1358       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
1359     }
1360   }
1361 
1362   _subgraph_object_klasses->append_if_missing(buffered_k);
1363   _has_non_early_klasses |= is_non_early_klass(orig_k);
1364 }
1365 
1366 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1367   if (CDSConfig::is_dumping_invokedynamic()) {
1368     // FIXME -- this allows LambdaProxy classes
1369     return;
1370   }
1371   if (ik->module()->name() == vmSymbols::java_base()) {
1372     assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
1373     return;
1374   }
1375 
1376   const char* lambda_msg = "";
1377   if (CDSConfig::is_dumping_invokedynamic()) {
1378     lambda_msg = ", or a lambda proxy class";
1379     if (HeapShared::is_lambda_proxy_klass(ik) &&
1380         (ik->class_loader() == nullptr ||
1381          ik->class_loader() == SystemDictionary::java_platform_loader() ||
1382          ik->class_loader() == SystemDictionary::java_system_loader())) {
1383       return;
1384     }
1385   }
1386 
1387 #ifndef PRODUCT
1388   if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) {
1389     // This class is loaded by ArchiveHeapTestClass
1390     return;
1391   }
1392   const char* testcls_msg = ", or a test class in an unnamed package of an unnamed module";
1393 #else
1394   const char* testcls_msg = "";
1395 #endif
1396 
1397   ResourceMark rm;
1398   log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s%s",
1399                        ik->external_name(), lambda_msg, testcls_msg);
1400   MetaspaceShared::unrecoverable_writing_error();
1401 }
1402 
1403 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
1404   if (k->is_objArray_klass()) {
1405     k = ObjArrayKlass::cast(k)->bottom_klass();
1406   }
1407   if (k->is_instance_klass()) {
1408     if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
1409       ResourceMark rm;
1410       log_info(cds, heap)("non-early: %s", k->external_name());
1411       return true;
1412     } else {
1413       return false;
1414     }
1415   } else {
1416     return false;
1417   }
1418 }
1419 
1420 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
1421 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
1422   _k = info->klass();
1423   _entry_field_records = nullptr;
1424   _subgraph_object_klasses = nullptr;
1425   _is_full_module_graph = info->is_full_module_graph();
1426 
1427   if (_is_full_module_graph) {
1428     // Consider all classes referenced by the full module graph as early -- we will be
1429     // allocating objects of these classes during JVMTI early phase, so they cannot
1430     // be processed by (non-early) JVMTI ClassFileLoadHook
1431     _has_non_early_klasses = false;
1432   } else {
1433     _has_non_early_klasses = info->has_non_early_klasses();
1434   }
1435 
1436   if (_has_non_early_klasses) {
1437     ResourceMark rm;
1438     log_info(cds, heap)(
1439           "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
1440           _k->external_name());
1441   }
1442 
1443   // populate the entry fields
1444   GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
1445   if (entry_fields != nullptr) {
1446     int num_entry_fields = entry_fields->length();
1447     assert(num_entry_fields % 2 == 0, "sanity");
1448     _entry_field_records =
1449       ArchiveBuilder::new_ro_array<int>(num_entry_fields);
1450     for (int i = 0 ; i < num_entry_fields; i++) {
1451       _entry_field_records->at_put(i, entry_fields->at(i));
1452     }
1453   }
1454 
1455   // the Klasses of the objects in the sub-graphs
1456   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
1457   if (subgraph_object_klasses != nullptr) {
1458     int num_subgraphs_klasses = subgraph_object_klasses->length();
1459     _subgraph_object_klasses =
1460       ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
1461     bool is_special = (_k == ArchiveBuilder::get_buffered_klass(vmClasses::Object_klass()));
1462     for (int i = 0; i < num_subgraphs_klasses; i++) {
1463       Klass* subgraph_k = subgraph_object_klasses->at(i);
1464       if (log_is_enabled(Info, cds, heap)) {
1465         ResourceMark rm;
1466         const char* owner_name =  is_special ? "<special>" : _k->external_name();
1467         if (subgraph_k->is_instance_klass()) {
1468           InstanceKlass* src_ik = InstanceKlass::cast(ArchiveBuilder::current()->get_source_addr(subgraph_k));
1469         }
1470         log_info(cds, heap)(
1471           "Archived object klass %s (%2d) => %s",
1472           owner_name, i, subgraph_k->external_name());
1473       }
1474       _subgraph_object_klasses->at_put(i, subgraph_k);
1475       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
1476     }
1477   }
1478 
1479   ArchivePtrMarker::mark_pointer(&_k);
1480   ArchivePtrMarker::mark_pointer(&_entry_field_records);
1481   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
1482 }
1483 
1484 class HeapShared::CopyKlassSubGraphInfoToArchive : StackObj {
1485   CompactHashtableWriter* _writer;
1486 public:
1487   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
1488 
1489   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
1490     if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
1491       ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
1492       Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
1493       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
1494       u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
1495       _writer->add(hash, delta);
1496     }
1497     return true; // keep on iterating
1498   }
1499 };
1500 
1501 ArchivedKlassSubGraphInfoRecord* HeapShared::archive_subgraph_info(KlassSubGraphInfo* info) {
1502   ArchivedKlassSubGraphInfoRecord* record =
1503       (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
1504   record->init(info);
1505   if (info ==  _dump_time_special_subgraph) {
1506     _run_time_special_subgraph = record;
1507   }
1508   return record;
1509 }
1510 
1511 // Build the records of archived subgraph infos, which include:
1512 // - Entry points to all subgraphs from the containing class mirror. The entry
1513 //   points are static fields in the mirror. For each entry point, the field
1514 //   offset, and value are recorded in the sub-graph
1515 //   info. The value is stored back to the corresponding field at runtime.
1516 // - A list of klasses that need to be loaded/initialized before archived
1517 //   java object sub-graph can be accessed at runtime.
1518 void HeapShared::write_subgraph_info_table() {
1519   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
1520   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
1521   CompactHashtableStats stats;
1522 
1523   _run_time_subgraph_info_table.reset();
1524 
1525   CompactHashtableWriter writer(d_table->_count, &stats);
1526   CopyKlassSubGraphInfoToArchive copy(&writer);
1527   d_table->iterate(&copy);
1528   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
1529 
1530 #ifndef PRODUCT
1531   if (ArchiveHeapTestClass != nullptr) {
1532     size_t len = strlen(ArchiveHeapTestClass) + 1;
1533     Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
1534     strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
1535     _archived_ArchiveHeapTestClass = array;
1536   }
1537 #endif
1538   if (log_is_enabled(Info, cds, heap)) {
1539     print_stats();
1540   }
1541 }
1542 
1543 void HeapShared::add_root_segment(objArrayOop segment_oop) {
1544   assert(segment_oop != nullptr, "must be");
1545   assert(ArchiveHeapLoader::is_in_use(), "must be");
1546   if (_root_segments == nullptr) {
1547     _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
1548   }
1549   _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
1550 }
1551 
1552 void HeapShared::init_root_segment_sizes(int max_size_elems) {
1553   _root_segment_max_size_elems = max_size_elems;
1554 }
1555 
1556 void HeapShared::serialize_tables(SerializeClosure* soc) {
1557 
1558 #ifndef PRODUCT
1559   soc->do_ptr(&_archived_ArchiveHeapTestClass);
1560   if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
1561     _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
1562     setup_test_class(_test_class_name);
1563   }
1564 #endif
1565 
1566   _run_time_subgraph_info_table.serialize_header(soc);
1567   soc->do_ptr(&_run_time_special_subgraph);
1568 }
1569 
1570 static void verify_the_heap(Klass* k, const char* which) {
1571   if (VerifyArchivedFields > 0) {
1572     ResourceMark rm;
1573     log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
1574                         which, k->external_name());
1575 
1576     VM_Verify verify_op;
1577     VMThread::execute(&verify_op);
1578 
1579     if (VerifyArchivedFields > 1 && is_init_completed()) {
1580       // At this time, the oop->klass() of some archived objects in the heap may not
1581       // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
1582       // have enough information (object size, oop maps, etc) so that a GC can be safely
1583       // performed.
1584       //
1585       // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
1586       // to check for GC safety.
1587       log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
1588                           which, k->external_name());
1589       FlagSetting fs1(VerifyBeforeGC, true);
1590       FlagSetting fs2(VerifyDuringGC, true);
1591       FlagSetting fs3(VerifyAfterGC,  true);
1592       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1593     }
1594   }
1595 }
1596 
1597 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1598 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1599 //
1600 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1601 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1602 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1603 void HeapShared::resolve_classes(JavaThread* current) {
1604   assert(CDSConfig::is_using_archive(), "runtime only!");
1605   if (!ArchiveHeapLoader::is_in_use()) {
1606     return; // nothing to do
1607   }
1608 
1609   if (!CDSConfig::is_using_aot_linked_classes()) {
1610     assert( _run_time_special_subgraph != nullptr, "must be");
1611     Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1612     if (klasses != nullptr) {
1613       for (int i = 0; i < klasses->length(); i++) {
1614         Klass* k = klasses->at(i);
1615         ExceptionMark em(current); // no exception can happen here
1616         resolve_or_init(k, /*do_init*/false, current);
1617       }
1618     }
1619   }
1620 
1621   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1622   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1623 }
1624 
1625 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1626   for (int i = 0; fields[i].valid(); i++) {
1627     ArchivableStaticFieldInfo* info = &fields[i];
1628     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1629     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1630     assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1631     resolve_classes_for_subgraph_of(current, k);
1632   }
1633 }
1634 
1635 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1636   JavaThread* THREAD = current;
1637   ExceptionMark em(THREAD);
1638   const ArchivedKlassSubGraphInfoRecord* record =
1639    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1640   if (HAS_PENDING_EXCEPTION) {
1641    CLEAR_PENDING_EXCEPTION;
1642   }
1643   if (record == nullptr) {
1644    clear_archived_roots_of(k);
1645   }
1646 }
1647 
1648 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1649   if (CDSConfig::is_loading_invokedynamic() || CDSConfig::is_dumping_invokedynamic()) {
1650     resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1651     resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1652     resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1653     resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1654     resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1655     resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1656     resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1657   }
1658 }
1659 
1660 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1661 //   - interned strings
1662 //   - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1663 //   - ConstantPool::resolved_references()
1664 //   - Universe::<xxx>_exception_instance()
1665 //
1666 // For example, if this enum class is initialized at AOT cache assembly time:
1667 //
1668 //    enum Fruit {
1669 //       APPLE, ORANGE, BANANA;
1670 //       static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1671 //   }
1672 //
1673 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1674 // should be initialized before any Java code can access the Fruit class. Note that
1675 // HashSet itself doesn't necessary need to be an aot-initialized class.
1676 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
1677   if (!ArchiveHeapLoader::is_in_use()) {
1678     return;
1679   }
1680 
1681   assert( _run_time_special_subgraph != nullptr, "must be");
1682   Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1683   if (klasses != nullptr) {
1684     for (int pass = 0; pass < 2; pass ++) {
1685       for (int i = 0; i < klasses->length(); i++) {
1686         Klass* k = klasses->at(i);
1687         if (k->class_loader_data() == nullptr) {
1688           // This class is not yet loaded. We will initialize it in a later phase.
1689           // For example, we have loaded only AOTLinkedClassCategory::BOOT1 classes
1690           // but k is part of AOTLinkedClassCategory::BOOT2.
1691           continue;
1692         }
1693         if (k->class_loader() == class_loader()) {
1694           if (pass == 0) {
1695             if (k->is_instance_klass()) {
1696               InstanceKlass::cast(k)->link_class(CHECK);
1697             }
1698           } else {
1699             resolve_or_init(k, /*do_init*/true, CHECK);
1700           }
1701         }
1702       }
1703     }
1704   }
1705 }
1706 
1707 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
1708   JavaThread* THREAD = current;
1709   if (!ArchiveHeapLoader::is_in_use()) {
1710     return; // nothing to do
1711   }
1712 
1713   if (k->name()->equals("jdk/internal/module/ArchivedModuleGraph") &&
1714       !CDSConfig::is_using_optimized_module_handling() &&
1715       // archive was created with --module-path
1716       ClassLoaderExt::num_module_paths() > 0) {
1717     // ArchivedModuleGraph was created with a --module-path that's different than the runtime --module-path.
1718     // Thus, it might contain references to modules that do not exist at runtime. We cannot use it.
1719     log_info(cds, heap)("Skip initializing ArchivedModuleGraph subgraph: is_using_optimized_module_handling=%s num_module_paths=%d",
1720                         BOOL_TO_STR(CDSConfig::is_using_optimized_module_handling()), ClassLoaderExt::num_module_paths());
1721     return;
1722   }
1723 
1724   ExceptionMark em(THREAD);
1725   const ArchivedKlassSubGraphInfoRecord* record =
1726     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
1727 
1728   if (HAS_PENDING_EXCEPTION) {
1729     CLEAR_PENDING_EXCEPTION;
1730     // None of the field value will be set if there was an exception when initializing the classes.
1731     // The java code will not see any of the archived objects in the
1732     // subgraphs referenced from k in this case.
1733     return;
1734   }
1735 
1736   if (record != nullptr) {
1737     init_archived_fields_for(k, record);
1738   }
1739 }
1740 
1741 const ArchivedKlassSubGraphInfoRecord*
1742 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
1743   assert(!CDSConfig::is_dumping_heap(), "Should not be called when dumping heap");
1744 
1745   if (!k->is_shared()) {
1746     return nullptr;
1747   }
1748   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1749   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1750 
1751 #ifndef PRODUCT
1752   if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) {
1753     _test_class = k;
1754     _test_class_record = record;
1755   }
1756 #endif
1757 
1758   // Initialize from archived data. Currently this is done only
1759   // during VM initialization time. No lock is needed.
1760   if (record == nullptr) {
1761     if (log_is_enabled(Info, cds, heap)) {
1762       ResourceMark rm(THREAD);
1763       log_info(cds, heap)("subgraph %s is not recorded",
1764                           k->external_name());
1765     }
1766     return nullptr;
1767   } else {
1768     if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) {
1769       if (log_is_enabled(Info, cds, heap)) {
1770         ResourceMark rm(THREAD);
1771         log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
1772                             k->external_name());
1773       }
1774       return nullptr;
1775     }
1776 
1777     if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
1778       if (log_is_enabled(Info, cds, heap)) {
1779         ResourceMark rm(THREAD);
1780         log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
1781                             k->external_name());
1782       }
1783       return nullptr;
1784     }
1785 
1786     if (log_is_enabled(Info, cds, heap)) {
1787       ResourceMark rm;
1788       log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1789     }
1790 
1791     resolve_or_init(k, do_init, CHECK_NULL);
1792 
1793     // Load/link/initialize the klasses of the objects in the subgraph.
1794     // nullptr class loader is used.
1795     Array<Klass*>* klasses = record->subgraph_object_klasses();
1796     if (klasses != nullptr) {
1797       for (int i = 0; i < klasses->length(); i++) {
1798         Klass* klass = klasses->at(i);
1799         if (!klass->is_shared()) {
1800           return nullptr;
1801         }
1802         resolve_or_init(klass, do_init, CHECK_NULL);
1803       }
1804     }
1805   }
1806 
1807   return record;
1808 }
1809 
1810 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1811   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1812   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1813   if (k == nullptr) {
1814     return;
1815   }
1816   assert(k->is_shared_boot_class(), "sanity");
1817   resolve_or_init(k, false, CHECK);
1818   if (do_init) {
1819     resolve_or_init(k, true, CHECK);
1820   }
1821 }
1822 
1823 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1824   if (!do_init) {
1825     if (k->class_loader_data() == nullptr) {
1826       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1827       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1828     }
1829   } else {
1830     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1831     if (k->is_instance_klass()) {
1832       InstanceKlass* ik = InstanceKlass::cast(k);
1833       ik->initialize(CHECK);
1834     } else if (k->is_objArray_klass()) {
1835       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1836       oak->initialize(CHECK);
1837     }
1838   }
1839 }
1840 
1841 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1842   verify_the_heap(k, "before");
1843 
1844   // Load the subgraph entry fields from the record and store them back to
1845   // the corresponding fields within the mirror.
1846   oop m = k->java_mirror();
1847   Array<int>* entry_field_records = record->entry_field_records();
1848   if (entry_field_records != nullptr) {
1849     int efr_len = entry_field_records->length();
1850     assert(efr_len % 2 == 0, "sanity");
1851     for (int i = 0; i < efr_len; i += 2) {
1852       int field_offset = entry_field_records->at(i);
1853       int root_index = entry_field_records->at(i+1);
1854       oop v = get_root(root_index, /*clear=*/true);
1855       if (k->has_aot_initialized_mirror()) {
1856         assert(v == m->obj_field(field_offset), "must be aot-initialized");
1857       } else {
1858         m->obj_field_put(field_offset, v);
1859       }
1860       log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1861     }
1862 
1863     // Done. Java code can see the archived sub-graphs referenced from k's
1864     // mirror after this point.
1865     if (log_is_enabled(Info, cds, heap)) {
1866       ResourceMark rm;
1867       log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1868                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1869                           k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1870     }
1871   }
1872 
1873   verify_the_heap(k, "after ");
1874 }
1875 
1876 void HeapShared::clear_archived_roots_of(Klass* k) {
1877   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1878   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1879   if (record != nullptr) {
1880     Array<int>* entry_field_records = record->entry_field_records();
1881     if (entry_field_records != nullptr) {
1882       int efr_len = entry_field_records->length();
1883       assert(efr_len % 2 == 0, "sanity");
1884       for (int i = 0; i < efr_len; i += 2) {
1885         int root_index = entry_field_records->at(i+1);
1886         clear_root(root_index);
1887       }
1888     }
1889   }
1890 }
1891 
1892 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1893   int _level;
1894   bool _record_klasses_only;
1895   KlassSubGraphInfo* _subgraph_info;
1896   oop _referencing_obj;
1897 
1898   // The following are for maintaining a stack for determining
1899   // CachedOopInfo::_referrer
1900   static WalkOopAndArchiveClosure* _current;
1901   WalkOopAndArchiveClosure* _last;
1902  public:
1903   WalkOopAndArchiveClosure(int level,
1904                            bool record_klasses_only,
1905                            KlassSubGraphInfo* subgraph_info,
1906                            oop orig) :
1907     _level(level),
1908     _record_klasses_only(record_klasses_only),
1909     _subgraph_info(subgraph_info),
1910     _referencing_obj(orig) {
1911     _last = _current;
1912     _current = this;
1913   }
1914   ~WalkOopAndArchiveClosure() {
1915     _current = _last;
1916   }
1917   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1918   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1919 
1920  protected:
1921   template <class T> void do_oop_work(T *p) {
1922     oop obj = RawAccess<>::oop_load(p);
1923     if (!CompressedOops::is_null(obj)) {
1924       size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1925 
1926       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1927         ResourceMark rm;
1928         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1929                              _referencing_obj->klass()->external_name(), field_delta,
1930                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1931         if (log_is_enabled(Trace, cds, heap)) {
1932           LogTarget(Trace, cds, heap) log;
1933           LogStream out(log);
1934           obj->print_on(&out);
1935         }
1936       }
1937 
1938       bool success = HeapShared::archive_reachable_objects_from(
1939           _level + 1, _subgraph_info, obj);
1940       assert(success, "VM should have exited with unarchivable objects for _level > 1");
1941     }
1942   }
1943 
1944  public:
1945   static WalkOopAndArchiveClosure* current()  { return _current;              }
1946   oop referencing_obj()                       { return _referencing_obj;      }
1947   KlassSubGraphInfo* subgraph_info()          { return _subgraph_info;        }
1948 };
1949 
1950 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
1951 
1952 // Checks if an oop has any non-null oop fields
1953 class PointsToOopsChecker : public BasicOopIterateClosure {
1954   bool _result;
1955 
1956   template <class T> void check(T *p) {
1957     _result |= (HeapAccess<>::oop_load(p) != nullptr);
1958   }
1959 
1960 public:
1961   PointsToOopsChecker() : _result(false) {}
1962   void do_oop(narrowOop *p) { check(p); }
1963   void do_oop(      oop *p) { check(p); }
1964   bool result() { return _result; }
1965 };
1966 
1967 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1968   WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1969   oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1970   PointsToOopsChecker points_to_oops_checker;
1971   obj->oop_iterate(&points_to_oops_checker);
1972   return CachedOopInfo(referrer, points_to_oops_checker.result());
1973 }
1974 
1975 void HeapShared::init_box_classes(TRAPS) {
1976   if (ArchiveHeapLoader::is_in_use()) {
1977     vmClasses::Boolean_klass()->initialize(CHECK);
1978     vmClasses::Character_klass()->initialize(CHECK);
1979     vmClasses::Float_klass()->initialize(CHECK);
1980     vmClasses::Double_klass()->initialize(CHECK);
1981     vmClasses::Byte_klass()->initialize(CHECK);
1982     vmClasses::Short_klass()->initialize(CHECK);
1983     vmClasses::Integer_klass()->initialize(CHECK);
1984     vmClasses::Long_klass()->initialize(CHECK);
1985     vmClasses::Void_klass()->initialize(CHECK);
1986   }
1987 }
1988 
1989 void HeapShared::exit_on_error() {
1990   if (_context != nullptr) {
1991     ResourceMark rm;
1992     LogStream ls(Log(cds, heap)::error());
1993     ls.print_cr("Context");
1994     for (int i = 0; i < _context->length(); i++) {
1995       const char* s = _context->at(i);
1996       ls.print_cr("- %s", s);
1997     }
1998   }
1999   if (_trace != nullptr) {
2000     ResourceMark rm;
2001     LogStream ls(Log(cds, heap)::error());
2002     ls.print_cr("Reference trace");
2003     for (int i = 0; i < _trace->length(); i++) {
2004       oop orig_obj = _trace->at(i);
2005       ls.print_cr("[%d] ========================================", i);
2006       orig_obj->print_on(&ls);
2007       ls.cr();
2008     }
2009   }
2010   MetaspaceShared::unrecoverable_writing_error();
2011 }
2012 
2013 // (1) If orig_obj has not been archived yet, archive it.
2014 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
2015 //     trace all  objects that are reachable from it, and make sure these objects are archived.
2016 // (3) Record the klasses of all orig_obj and all reachable objects.
2017 bool HeapShared::archive_reachable_objects_from(int level,
2018                                                 KlassSubGraphInfo* subgraph_info,
2019                                                 oop orig_obj) {
2020   ArchivingObjectMark mark(orig_obj);
2021   assert(orig_obj != nullptr, "must be");
2022 
2023   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
2024     // This object has injected fields that cannot be supported easily, so we disallow them for now.
2025     // If you get an error here, you probably made a change in the JDK library that has added
2026     // these objects that are referenced (directly or indirectly) by static fields.
2027     ResourceMark rm;
2028     log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
2029     debug_trace();
2030     exit_on_error();
2031   }
2032 
2033   if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
2034     ResourceMark rm;
2035     LogTarget(Debug, cds, heap) log;
2036     LogStream out(log);
2037     out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
2038     Klass* k = java_lang_Class::as_Klass(orig_obj);
2039     if (k != nullptr) {
2040       out.print("%s", k->external_name());
2041     } else {
2042       out.print("primitive");
2043     }
2044     out.print_cr("; scratch mirror = "  PTR_FORMAT,
2045                  p2i(scratch_java_mirror(orig_obj)));
2046   }
2047 
2048   if (CDSConfig::is_initing_classes_at_dump_time()) {
2049     if (java_lang_Class::is_instance(orig_obj)) {
2050       orig_obj = scratch_java_mirror(orig_obj);
2051       assert(orig_obj != nullptr, "must be archived");
2052     }
2053   } else if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _dump_time_special_subgraph) {
2054     // Without CDSConfig::is_initing_classes_at_dump_time(), we only allow archived objects to
2055     // point to the mirrors of (1) j.l.Object, (2) primitive classes, and (3) box classes. These are initialized
2056     // very early by HeapShared::init_box_classes().
2057     if (orig_obj == vmClasses::Object_klass()->java_mirror()
2058         || java_lang_Class::is_primitive(orig_obj)
2059         || orig_obj == vmClasses::Boolean_klass()->java_mirror()
2060         || orig_obj == vmClasses::Character_klass()->java_mirror()
2061         || orig_obj == vmClasses::Float_klass()->java_mirror()
2062         || orig_obj == vmClasses::Double_klass()->java_mirror()
2063         || orig_obj == vmClasses::Byte_klass()->java_mirror()
2064         || orig_obj == vmClasses::Short_klass()->java_mirror()
2065         || orig_obj == vmClasses::Integer_klass()->java_mirror()
2066         || orig_obj == vmClasses::Long_klass()->java_mirror()
2067         || orig_obj == vmClasses::Void_klass()->java_mirror()) {
2068       orig_obj = scratch_java_mirror(orig_obj);
2069       assert(orig_obj != nullptr, "must be archived");
2070     } else {
2071       // If you get an error here, you probably made a change in the JDK library that has added a Class
2072       // object that is referenced (directly or indirectly) by an ArchivableStaticFieldInfo
2073       // defined at the top of this file.
2074       log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
2075       debug_trace();
2076       MetaspaceShared::unrecoverable_writing_error();
2077     }
2078   }
2079 
2080   if (has_been_seen_during_subgraph_recording(orig_obj)) {
2081     // orig_obj has already been archived and traced. Nothing more to do.
2082     return true;
2083   } else {
2084     set_has_been_seen_during_subgraph_recording(orig_obj);
2085   }
2086 
2087   bool already_archived = has_been_archived(orig_obj);
2088   bool record_klasses_only = already_archived;
2089   if (!already_archived) {
2090     ++_num_new_archived_objs;
2091     if (!archive_object(orig_obj)) {
2092       // Skip archiving the sub-graph referenced from the current entry field.
2093       ResourceMark rm;
2094       log_error(cds, heap)(
2095         "Cannot archive the sub-graph referenced from %s object ("
2096         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
2097         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
2098       if (level == 1) {
2099         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
2100         // as the Java code will take care of initializing this field dynamically.
2101         return false;
2102       } else {
2103         // We don't know how to handle an object that has been archived, but some of its reachable
2104         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
2105         // we have a real use case.
2106         exit_on_error();
2107       }
2108     }
2109   }
2110 
2111   Klass *orig_k = orig_obj->klass();
2112   subgraph_info->add_subgraph_object_klass(orig_k);
2113 
2114   WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
2115   orig_obj->oop_iterate(&walker);
2116 
2117   if (CDSConfig::is_initing_classes_at_dump_time()) {
2118     // The enum klasses are archived with aot-initialized mirror.
2119     // See AOTClassInitializer::can_archive_initialized_mirror().
2120   } else {
2121     if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2122       CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
2123     }
2124   }
2125 
2126   return true;
2127 }
2128 
2129 //
2130 // Start from the given static field in a java mirror and archive the
2131 // complete sub-graph of java heap objects that are reached directly
2132 // or indirectly from the starting object by following references.
2133 // Sub-graph archiving restrictions (current):
2134 //
2135 // - All classes of objects in the archived sub-graph (including the
2136 //   entry class) must be boot class only.
2137 // - No java.lang.Class instance (java mirror) can be included inside
2138 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
2139 //
2140 // The Java heap object sub-graph archiving process (see
2141 // WalkOopAndArchiveClosure):
2142 //
2143 // 1) Java object sub-graph archiving starts from a given static field
2144 // within a Class instance (java mirror). If the static field is a
2145 // reference field and points to a non-null java object, proceed to
2146 // the next step.
2147 //
2148 // 2) Archives the referenced java object. If an archived copy of the
2149 // current object already exists, updates the pointer in the archived
2150 // copy of the referencing object to point to the current archived object.
2151 // Otherwise, proceed to the next step.
2152 //
2153 // 3) Follows all references within the current java object and recursively
2154 // archive the sub-graph of objects starting from each reference.
2155 //
2156 // 4) Updates the pointer in the archived copy of referencing object to
2157 // point to the current archived object.
2158 //
2159 // 5) The Klass of the current java object is added to the list of Klasses
2160 // for loading and initializing before any object in the archived graph can
2161 // be accessed at runtime.
2162 //
2163 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
2164                                                              const char* klass_name,
2165                                                              int field_offset,
2166                                                              const char* field_name) {
2167   assert(CDSConfig::is_dumping_heap(), "dump time only");
2168   assert(k->is_shared_boot_class(), "must be boot class");
2169 
2170   oop m = k->java_mirror();
2171 
2172   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
2173   oop f = m->obj_field(field_offset);
2174 
2175   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
2176 
2177   if (!CompressedOops::is_null(f)) {
2178     if (log_is_enabled(Trace, cds, heap)) {
2179       LogTarget(Trace, cds, heap) log;
2180       LogStream out(log);
2181       f->print_on(&out);
2182     }
2183 
2184     bool success = archive_reachable_objects_from(1, subgraph_info, f);
2185     if (!success) {
2186       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
2187                            klass_name, field_name);
2188     } else {
2189       // Note: the field value is not preserved in the archived mirror.
2190       // Record the field as a new subGraph entry point. The recorded
2191       // information is restored from the archive at runtime.
2192       subgraph_info->add_subgraph_entry_field(field_offset, f);
2193       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f));
2194     }
2195   } else {
2196     // The field contains null, we still need to record the entry point,
2197     // so it can be restored at runtime.
2198     subgraph_info->add_subgraph_entry_field(field_offset, nullptr);
2199   }
2200 }
2201 
2202 #ifndef PRODUCT
2203 class VerifySharedOopClosure: public BasicOopIterateClosure {
2204  public:
2205   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
2206   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
2207 
2208  protected:
2209   template <class T> void do_oop_work(T *p) {
2210     oop obj = RawAccess<>::oop_load(p);
2211     if (!CompressedOops::is_null(obj)) {
2212       HeapShared::verify_reachable_objects_from(obj);
2213     }
2214   }
2215 };
2216 
2217 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
2218   assert(CDSConfig::is_dumping_heap(), "dump time only");
2219   assert(k->is_shared_boot_class(), "must be boot class");
2220 
2221   oop m = k->java_mirror();
2222   oop f = m->obj_field(field_offset);
2223   if (!CompressedOops::is_null(f)) {
2224     verify_subgraph_from(f);
2225   }
2226 }
2227 
2228 void HeapShared::verify_subgraph_from(oop orig_obj) {
2229   if (!has_been_archived(orig_obj)) {
2230     // It's OK for the root of a subgraph to be not archived. See comments in
2231     // archive_reachable_objects_from().
2232     return;
2233   }
2234 
2235   // Verify that all objects reachable from orig_obj are archived.
2236   init_seen_objects_table();
2237   verify_reachable_objects_from(orig_obj);
2238   delete_seen_objects_table();
2239 }
2240 
2241 void HeapShared::verify_reachable_objects_from(oop obj) {
2242   _num_total_verifications ++;
2243   if (java_lang_Class::is_instance(obj)) {
2244     obj = scratch_java_mirror(obj);
2245     assert(obj != nullptr, "must be");
2246   }
2247   if (!has_been_seen_during_subgraph_recording(obj)) {
2248     set_has_been_seen_during_subgraph_recording(obj);
2249     assert(has_been_archived(obj), "must be");
2250     VerifySharedOopClosure walker;
2251     obj->oop_iterate(&walker);
2252   }
2253 }
2254 #endif
2255 
2256 void HeapShared::check_special_subgraph_classes() {
2257   if (CDSConfig::is_initing_classes_at_dump_time()) {
2258     // We can have aot-initialized classes (such as Enums) that can reference objects
2259     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2260     // aot-initialize classes that are "safe".
2261     //
2262     // TODO: we need an automatic tool that checks the safety of aot-initialized
2263     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2264     return;
2265   } else {
2266     // In this case, the special subgraph should contain a few specific types
2267     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
2268     int num = klasses->length();
2269     for (int i = 0; i < num; i++) {
2270       Klass* subgraph_k = klasses->at(i);
2271       Symbol* name = ArchiveBuilder::current()->get_source_addr(subgraph_k->name());
2272       if (subgraph_k->is_instance_klass() &&
2273           name != vmSymbols::java_lang_Class() &&
2274           name != vmSymbols::java_lang_String() &&
2275           name != vmSymbols::java_lang_ArithmeticException() &&
2276           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
2277           name != vmSymbols::java_lang_ArrayStoreException() &&
2278           name != vmSymbols::java_lang_ClassCastException() &&
2279           name != vmSymbols::java_lang_InternalError() &&
2280           name != vmSymbols::java_lang_NullPointerException()) {
2281         ResourceMark rm;
2282         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
2283       }
2284     }
2285   }
2286 }
2287 
2288 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2289 int HeapShared::_num_new_walked_objs;
2290 int HeapShared::_num_new_archived_objs;
2291 int HeapShared::_num_old_recorded_klasses;
2292 
2293 int HeapShared::_num_total_subgraph_recordings = 0;
2294 int HeapShared::_num_total_walked_objs = 0;
2295 int HeapShared::_num_total_archived_objs = 0;
2296 int HeapShared::_num_total_recorded_klasses = 0;
2297 int HeapShared::_num_total_verifications = 0;
2298 
2299 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
2300   return _seen_objects_table->get(obj) != nullptr;
2301 }
2302 
2303 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
2304   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
2305   _seen_objects_table->put_when_absent(obj, true);
2306   _seen_objects_table->maybe_grow();
2307   ++ _num_new_walked_objs;
2308 }
2309 
2310 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
2311   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
2312   init_subgraph_info(k, is_full_module_graph);
2313   init_seen_objects_table();
2314   _num_new_walked_objs = 0;
2315   _num_new_archived_objs = 0;
2316   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
2317 }
2318 
2319 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
2320   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
2321     _num_old_recorded_klasses;
2322   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
2323                       "walked %d objs, archived %d new objs, recorded %d classes",
2324                       class_name, _num_new_walked_objs, _num_new_archived_objs,
2325                       num_new_recorded_klasses);
2326 
2327   delete_seen_objects_table();
2328 
2329   _num_total_subgraph_recordings ++;
2330   _num_total_walked_objs      += _num_new_walked_objs;
2331   _num_total_archived_objs    += _num_new_archived_objs;
2332   _num_total_recorded_klasses +=  num_new_recorded_klasses;
2333 }
2334 
2335 class ArchivableStaticFieldFinder: public FieldClosure {
2336   InstanceKlass* _ik;
2337   Symbol* _field_name;
2338   bool _found;
2339   int _offset;
2340 public:
2341   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
2342     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
2343 
2344   virtual void do_field(fieldDescriptor* fd) {
2345     if (fd->name() == _field_name) {
2346       assert(!_found, "fields can never be overloaded");
2347       if (is_reference_type(fd->field_type())) {
2348         _found = true;
2349         _offset = fd->offset();
2350       }
2351     }
2352   }
2353   bool found()     { return _found;  }
2354   int offset()     { return _offset; }
2355 };
2356 
2357 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
2358                                             TRAPS) {
2359   for (int i = 0; fields[i].valid(); i++) {
2360     ArchivableStaticFieldInfo* info = &fields[i];
2361     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
2362     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
2363     ResourceMark rm; // for stringStream::as_string() etc.
2364 
2365 #ifndef PRODUCT
2366     bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
2367     const char* test_class_name = ArchiveHeapTestClass;
2368 #else
2369     bool is_test_class = false;
2370     const char* test_class_name = ""; // avoid C++ printf checks warnings.
2371 #endif
2372 
2373     if (is_test_class) {
2374       log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name);
2375     }
2376 
2377     Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
2378     if (HAS_PENDING_EXCEPTION) {
2379       CLEAR_PENDING_EXCEPTION;
2380       stringStream st;
2381       st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
2382       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2383     }
2384 
2385     if (!k->is_instance_klass()) {
2386       stringStream st;
2387       st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
2388       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2389     }
2390 
2391     InstanceKlass* ik = InstanceKlass::cast(k);
2392     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
2393            "Only support boot classes");
2394 
2395     if (is_test_class) {
2396       if (ik->module()->is_named()) {
2397         // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary
2398         // core-lib classes. You need to at least append to the bootclasspath.
2399         stringStream st;
2400         st.print("ArchiveHeapTestClass %s is not in unnamed module", test_class_name);
2401         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2402       }
2403 
2404       if (ik->package() != nullptr) {
2405         // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
2406         stringStream st;
2407         st.print("ArchiveHeapTestClass %s is not in unnamed package", test_class_name);
2408         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2409       }
2410     } else {
2411       if (ik->module()->name() != vmSymbols::java_base()) {
2412         // We don't want to deal with cases when a module is unavailable at runtime.
2413         // FUTURE -- load from archived heap only when module graph has not changed
2414         //           between dump and runtime.
2415         stringStream st;
2416         st.print("%s is not in java.base module", info->klass_name);
2417         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2418       }
2419     }
2420 
2421     if (is_test_class) {
2422       log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", test_class_name);
2423     }
2424     ik->initialize(CHECK);
2425 
2426     ArchivableStaticFieldFinder finder(ik, field_name);
2427     ik->do_local_static_fields(&finder);
2428     if (!finder.found()) {
2429       stringStream st;
2430       st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name);
2431       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
2432     }
2433 
2434     info->klass = ik;
2435     info->offset = finder.offset();
2436   }
2437 }
2438 
2439 void HeapShared::init_subgraph_entry_fields(TRAPS) {
2440   assert(HeapShared::can_write(), "must be");
2441   _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
2442   init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
2443   if (CDSConfig::is_dumping_full_module_graph()) {
2444     init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
2445   }
2446 }
2447 
2448 #ifndef PRODUCT
2449 void HeapShared::setup_test_class(const char* test_class_name) {
2450   ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields;
2451   int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
2452   assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below");
2453   assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list");
2454 
2455   if (test_class_name != nullptr) {
2456     p[num_slots - 2].klass_name = test_class_name;
2457     p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
2458   }
2459 }
2460 
2461 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass
2462 // during runtime. This may be called before the module system is initialized so
2463 // we cannot rely on InstanceKlass::module(), etc.
2464 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
2465   if (_test_class != nullptr) {
2466     if (ik == _test_class) {
2467       return true;
2468     }
2469     Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
2470     if (klasses == nullptr) {
2471       return false;
2472     }
2473 
2474     for (int i = 0; i < klasses->length(); i++) {
2475       Klass* k = klasses->at(i);
2476       if (k == ik) {
2477         Symbol* name;
2478         if (k->is_instance_klass()) {
2479           name = InstanceKlass::cast(k)->name();
2480         } else if (k->is_objArray_klass()) {
2481           Klass* bk = ObjArrayKlass::cast(k)->bottom_klass();
2482           if (!bk->is_instance_klass()) {
2483             return false;
2484           }
2485           name = bk->name();
2486         } else {
2487           return false;
2488         }
2489 
2490         // See KlassSubGraphInfo::check_allowed_klass() - we only allow test classes
2491         // to be:
2492         //   (A) java.base classes (which must not be in the unnamed module)
2493         //   (B) test classes which must be in the unnamed package of the unnamed module.
2494         // So if we see a '/' character in the class name, it must be in (A);
2495         // otherwise it must be in (B).
2496         if (name->index_of_at(0, "/", 1)  >= 0) {
2497           return false; // (A)
2498         }
2499 
2500         return true; // (B)
2501       }
2502     }
2503   }
2504 
2505   return false;
2506 }
2507 
2508 void HeapShared::initialize_test_class_from_archive(JavaThread* current) {
2509   Klass* k = _test_class;
2510   if (k != nullptr && ArchiveHeapLoader::is_in_use()) {
2511     JavaThread* THREAD = current;
2512     ExceptionMark em(THREAD);
2513     const ArchivedKlassSubGraphInfoRecord* record =
2514       resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
2515 
2516     // The _test_class is in the unnamed module, so it can't call CDS.initializeFromArchive()
2517     // from its <clinit> method. So we set up its "archivedObjects" field first, before
2518     // calling its <clinit>. This is not strictly clean, but it's a convenient way to write unit
2519     // test cases (see test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchiveHeapTestClass.java).
2520     if (record != nullptr) {
2521       init_archived_fields_for(k, record);
2522     }
2523     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
2524   }
2525 }
2526 #endif
2527 
2528 void HeapShared::init_for_dumping(TRAPS) {
2529   if (HeapShared::can_write()) {
2530     setup_test_class(ArchiveHeapTestClass);
2531     _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
2532     init_subgraph_entry_fields(CHECK);
2533   }
2534 }
2535 
2536 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2537                                           bool is_full_module_graph) {
2538   _num_total_subgraph_recordings = 0;
2539   _num_total_walked_objs = 0;
2540   _num_total_archived_objs = 0;
2541   _num_total_recorded_klasses = 0;
2542   _num_total_verifications = 0;
2543 
2544   // For each class X that has one or more archived fields:
2545   // [1] Dump the subgraph of each archived field
2546   // [2] Create a list of all the class of the objects that can be reached
2547   //     by any of these static fields.
2548   //     At runtime, these classes are initialized before X's archived fields
2549   //     are restored by HeapShared::initialize_from_archived_subgraph().
2550   for (int i = 0; fields[i].valid(); ) {
2551     ArchivableStaticFieldInfo* info = &fields[i];
2552     const char* klass_name = info->klass_name;
2553     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2554 
2555     ContextMark cm(klass_name);
2556     // If you have specified consecutive fields of the same klass in
2557     // fields[], these will be archived in the same
2558     // {start_recording_subgraph ... done_recording_subgraph} pass to
2559     // save time.
2560     for (; fields[i].valid(); i++) {
2561       ArchivableStaticFieldInfo* f = &fields[i];
2562       if (f->klass_name != klass_name) {
2563         break;
2564       }
2565 
2566       ContextMark cm(f->field_name);
2567       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2568                                                   f->offset, f->field_name);
2569     }
2570     done_recording_subgraph(info->klass, klass_name);
2571   }
2572 
2573   log_info(cds, heap)("Archived subgraph records = %d",
2574                       _num_total_subgraph_recordings);
2575   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
2576   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
2577   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2578 
2579 #ifndef PRODUCT
2580   for (int i = 0; fields[i].valid(); i++) {
2581     ArchivableStaticFieldInfo* f = &fields[i];
2582     verify_subgraph_from_static_field(f->klass, f->offset);
2583   }
2584   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
2585 #endif
2586 }
2587 
2588 // Not all the strings in the global StringTable are dumped into the archive, because
2589 // some of those strings may be only referenced by classes that are excluded from
2590 // the archive. We need to explicitly mark the strings that are:
2591 //   [1] used by classes that WILL be archived;
2592 //   [2] included in the SharedArchiveConfigFile.
2593 void HeapShared::add_to_dumped_interned_strings(oop string) {
2594   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2595   assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2596   bool created;
2597   _dumped_interned_strings->put_if_absent(string, true, &created);
2598   if (created) {
2599     _dumped_interned_strings->maybe_grow();
2600   }
2601 }
2602 
2603 void HeapShared::debug_trace() {
2604   ResourceMark rm;
2605   WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
2606   if (walker != nullptr) {
2607     LogStream ls(Log(cds, heap)::error());
2608     CDSHeapVerifier::trace_to_root(&ls, walker->referencing_obj());
2609   }
2610 }
2611 
2612 #ifndef PRODUCT
2613 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2614 // region. This way we can quickly relocate all the pointers without using
2615 // BasicOopIterateClosure at runtime.
2616 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2617   void* _start;
2618   BitMap *_oopmap;
2619   int _num_total_oops;
2620   int _num_null_oops;
2621  public:
2622   FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
2623     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
2624 
2625   virtual void do_oop(narrowOop* p) {
2626     assert(UseCompressedOops, "sanity");
2627     _num_total_oops ++;
2628     narrowOop v = *p;
2629     if (!CompressedOops::is_null(v)) {
2630       size_t idx = p - (narrowOop*)_start;
2631       _oopmap->set_bit(idx);
2632     } else {
2633       _num_null_oops ++;
2634     }
2635   }
2636   virtual void do_oop(oop* p) {
2637     assert(!UseCompressedOops, "sanity");
2638     _num_total_oops ++;
2639     if ((*p) != nullptr) {
2640       size_t idx = p - (oop*)_start;
2641       _oopmap->set_bit(idx);
2642     } else {
2643       _num_null_oops ++;
2644     }
2645   }
2646   int num_total_oops() const { return _num_total_oops; }
2647   int num_null_oops()  const { return _num_null_oops; }
2648 };
2649 #endif
2650 
2651 void HeapShared::count_allocation(size_t size) {
2652   _total_obj_count ++;
2653   _total_obj_size += size;
2654   for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
2655     if (size <= (size_t(1) << i)) {
2656       _alloc_count[i] ++;
2657       _alloc_size[i] += size;
2658       return;
2659     }
2660   }
2661 }
2662 
2663 static double avg_size(size_t size, size_t count) {
2664   double avg = 0;
2665   if (count > 0) {
2666     avg = double(size * HeapWordSize) / double(count);
2667   }
2668   return avg;
2669 }
2670 
2671 void HeapShared::print_stats() {
2672   size_t huge_count = _total_obj_count;
2673   size_t huge_size = _total_obj_size;
2674 
2675   for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
2676     size_t byte_size_limit = (size_t(1) << i) * HeapWordSize;
2677     size_t count = _alloc_count[i];
2678     size_t size = _alloc_size[i];
2679     log_info(cds, heap)(SIZE_FORMAT_W(8) " objects are <= " SIZE_FORMAT_W(-6)
2680                         " bytes (total " SIZE_FORMAT_W(8) " bytes, avg %8.1f bytes)",
2681                         count, byte_size_limit, size * HeapWordSize, avg_size(size, count));
2682     huge_count -= count;
2683     huge_size -= size;
2684   }
2685 
2686   log_info(cds, heap)(SIZE_FORMAT_W(8) " huge  objects               (total "  SIZE_FORMAT_W(8) " bytes"
2687                       ", avg %8.1f bytes)",
2688                       huge_count, huge_size * HeapWordSize,
2689                       avg_size(huge_size, huge_count));
2690   log_info(cds, heap)(SIZE_FORMAT_W(8) " total objects               (total "  SIZE_FORMAT_W(8) " bytes"
2691                       ", avg %8.1f bytes)",
2692                       _total_obj_count, _total_obj_size * HeapWordSize,
2693                       avg_size(_total_obj_size, _total_obj_count));
2694 }
2695 
2696 bool HeapShared::is_archived_boot_layer_available(JavaThread* current) {
2697   TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS);
2698   InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle(), Handle());
2699   if (k == nullptr) {
2700     return false;
2701   } else {
2702     TempNewSymbol field_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_FIELD);
2703     TempNewSymbol field_signature = SymbolTable::new_symbol("Ljdk/internal/module/ArchivedBootLayer;");
2704     fieldDescriptor fd;
2705     if (k->find_field(field_name, field_signature, true, &fd) != nullptr) {
2706       oop m = k->java_mirror();
2707       oop f = m->obj_field(fd.offset());
2708       if (CompressedOops::is_null(f)) {
2709         return false;
2710       }
2711     } else {
2712       return false;
2713     }
2714   }
2715   return true;
2716 }
2717 
2718 #endif // INCLUDE_CDS_JAVA_HEAP