1 /*
   2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveUtils.hpp"
  28 #include "cds/filemap.hpp"
  29 #include "cds/heapShared.inline.hpp"
  30 #include "cds/metaspaceShared.hpp"
  31 #include "classfile/classLoaderData.hpp"
  32 #include "classfile/classLoaderDataShared.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/moduleEntry.hpp"
  35 #include "classfile/stringTable.hpp"
  36 #include "classfile/symbolTable.hpp"
  37 #include "classfile/systemDictionary.hpp"
  38 #include "classfile/systemDictionaryShared.hpp"
  39 #include "classfile/vmClasses.hpp"
  40 #include "classfile/vmSymbols.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "gc/shared/gcVMOperations.hpp"
  44 #include "logging/log.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metadataFactory.hpp"
  49 #include "memory/metaspaceClosure.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/compressedOops.inline.hpp"
  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/objArrayOop.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "runtime/fieldDescriptor.inline.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/javaCalls.hpp"
  62 #include "runtime/safepoint.hpp"
  63 #include "runtime/safepointVerifiers.hpp"
  64 #include "utilities/bitMap.inline.hpp"
  65 #include "utilities/copy.hpp"
  66 #if INCLUDE_G1GC
  67 #include "gc/g1/g1CollectedHeap.hpp"
  68 #endif
  69 
  70 #if INCLUDE_CDS_JAVA_HEAP
  71 
  72 bool HeapShared::_closed_regions_mapped = false;
  73 bool HeapShared::_open_regions_mapped = false;
  74 bool HeapShared::_is_loaded = false;
  75 address   HeapShared::_narrow_oop_base;
  76 int       HeapShared::_narrow_oop_shift;
  77 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
  78 
  79 uintptr_t HeapShared::_loaded_heap_bottom = 0;
  80 uintptr_t HeapShared::_loaded_heap_top = 0;
  81 uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX;
  82 uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX;
  83 uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX;
  84 uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX;
  85 uintptr_t HeapShared::_dumptime_top    = 0;
  86 intx HeapShared::_runtime_offset_0 = 0;
  87 intx HeapShared::_runtime_offset_1 = 0;
  88 intx HeapShared::_runtime_offset_2 = 0;
  89 intx HeapShared::_runtime_offset_3 = 0;
  90 bool HeapShared::_loading_failed = false;
  91 //
  92 // If you add new entries to the following tables, you should know what you're doing!
  93 //
  94 
  95 // Entry fields for shareable subgraphs archived in the closed archive heap
  96 // region. Warning: Objects in the subgraphs should not have reference fields
  97 // assigned at runtime.
  98 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  99   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 100   {"java/lang/Long$LongCache",                    "archivedCache"},
 101   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 102   {"java/lang/Short$ShortCache",                  "archivedCache"},
 103   {"java/lang/Character$CharacterCache",          "archivedCache"},
 104   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 105   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 106 };
 107 // Entry fields for subgraphs archived in the open archive heap region.
 108 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
 109   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 110   {"java/util/ImmutableCollections",              "archivedObjects"},
 111   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 112   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 113   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 114 };
 115 
 116 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
 117 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
 118   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 119   {"jdk/internal/module/ArchivedBootLayer",       "archivedBootLayer"},
 120   {"java/lang/Module$ArchivedData",               "archivedData"},
 121 };
 122 
 123 const static int num_closed_archive_subgraph_entry_fields =
 124   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 125 const static int num_open_archive_subgraph_entry_fields =
 126   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 127 const static int num_fmg_open_archive_subgraph_entry_fields =
 128   sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 129 
 130 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
 131 narrowOop HeapShared::_roots_narrow;
 132 OopHandle HeapShared::_roots;
 133 
 134 #ifdef ASSERT
 135 bool HeapShared::is_archived_object_during_dumptime(oop p) {
 136   assert(HeapShared::can_write(), "must be");
 137   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
 138   return Universe::heap()->is_archived_object(p);
 139 }
 140 #endif
 141 
 142 ////////////////////////////////////////////////////////////////
 143 //
 144 // Java heap object archiving support
 145 //
 146 ////////////////////////////////////////////////////////////////
 147 void HeapShared::fixup_regions() {
 148   FileMapInfo* mapinfo = FileMapInfo::current_info();
 149   if (is_mapped()) {
 150     mapinfo->fixup_mapped_heap_regions();
 151   } else if (_loading_failed) {
 152     fill_failed_loaded_region();
 153   }
 154   if (is_fully_available()) {
 155     _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow));
 156     if (!MetaspaceShared::use_full_module_graph()) {
 157       // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
 158       ClassLoaderDataShared::clear_archived_oops();
 159     }
 160   }
 161   SystemDictionaryShared::update_archived_mirror_native_pointers();
 162 }
 163 
 164 unsigned HeapShared::oop_hash(oop const& p) {
 165   unsigned hash = (unsigned)p->identity_hash();
 166   return hash;
 167 }
 168 
 169 static void reset_states(oop obj, TRAPS) {
 170   Handle h_obj(THREAD, obj);
 171   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 172   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 173   Symbol* method_sig = vmSymbols::void_method_signature();
 174 
 175   while (klass != NULL) {
 176     Method* method = klass->find_method(method_name, method_sig);
 177     if (method != NULL) {
 178       assert(method->is_private(), "must be");
 179       if (log_is_enabled(Debug, cds)) {
 180         ResourceMark rm(THREAD);
 181         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 182       }
 183       JavaValue result(T_VOID);
 184       JavaCalls::call_special(&result, h_obj, klass,
 185                               method_name, method_sig, CHECK);
 186     }
 187     klass = klass->java_super();
 188   }
 189 }
 190 
 191 void HeapShared::reset_archived_object_states(TRAPS) {
 192   assert(DumpSharedSpaces, "dump-time only");
 193   log_debug(cds)("Resetting platform loader");
 194   reset_states(SystemDictionary::java_platform_loader(), CHECK);
 195   log_debug(cds)("Resetting system loader");
 196   reset_states(SystemDictionary::java_system_loader(), CHECK);
 197 }
 198 
 199 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 200 oop HeapShared::find_archived_heap_object(oop obj) {
 201   assert(DumpSharedSpaces, "dump-time only");
 202   ArchivedObjectCache* cache = archived_object_cache();
 203   oop* p = cache->get(obj);
 204   if (p != NULL) {
 205     return *p;
 206   } else {
 207     return NULL;
 208   }
 209 }
 210 
 211 int HeapShared::append_root(oop obj) {
 212   assert(DumpSharedSpaces, "dump-time only");
 213 
 214   // No GC should happen since we aren't scanning _pending_roots.
 215   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 216 
 217   if (_pending_roots == NULL) {
 218     _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 219   }
 220 
 221   return _pending_roots->append(obj);
 222 }
 223 
 224 objArrayOop HeapShared::roots() {
 225   if (DumpSharedSpaces) {
 226     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 227     if (!HeapShared::can_write()) {
 228       return NULL;
 229     }
 230   } else {
 231     assert(UseSharedSpaces, "must be");
 232   }
 233 
 234   objArrayOop roots = (objArrayOop)_roots.resolve();
 235   assert(roots != NULL, "should have been initialized");
 236   return roots;
 237 }
 238 
 239 void HeapShared::set_roots(narrowOop roots) {
 240   assert(UseSharedSpaces, "runtime only");
 241   assert(is_fully_available(), "must be");
 242   _roots_narrow = roots;
 243 }
 244 
 245 // Returns an objArray that contains all the roots of the archived objects
 246 oop HeapShared::get_root(int index, bool clear) {
 247   assert(index >= 0, "sanity");
 248   if (DumpSharedSpaces) {
 249     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 250     assert(_pending_roots != NULL, "sanity");
 251     return _pending_roots->at(index);
 252   } else {
 253     assert(UseSharedSpaces, "must be");
 254     assert(!_roots.is_empty(), "must have loaded shared heap");
 255     oop result = roots()->obj_at(index);
 256     if (clear) {
 257       clear_root(index);
 258     }
 259     return result;
 260   }
 261 }
 262 
 263 void HeapShared::clear_root(int index) {
 264   assert(index >= 0, "sanity");
 265   assert(UseSharedSpaces, "must be");
 266   if (is_fully_available()) {
 267     if (log_is_enabled(Debug, cds, heap)) {
 268       oop old = roots()->obj_at(index);
 269       log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
 270     }
 271     roots()->obj_at_put(index, NULL);
 272   }
 273 }
 274 
 275 oop HeapShared::archive_object(oop obj) {
 276   assert(DumpSharedSpaces, "dump-time only");
 277 
 278   oop ao = find_archived_heap_object(obj);
 279   if (ao != NULL) {
 280     // already archived
 281     return ao;
 282   }
 283 
 284   int len = obj->size();
 285   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 286     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 287                          p2i(obj), (size_t)obj->size());
 288     return NULL;
 289   }
 290 
 291   oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
 292   if (archived_oop != NULL) {
 293     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 294     // Reinitialize markword to remove age/marking/locking/etc.
 295     //
 296     // We need to retain the identity_hash, because it may have been used by some hashtables
 297     // in the shared heap. This also has the side effect of pre-initializing the
 298     // identity_hash for all shared objects, so they are less likely to be written
 299     // into during run time, increasing the potential of memory sharing.
 300     int hash_original = obj->identity_hash();
 301 
 302     assert(SafepointSynchronize::is_at_safepoint(), "resolving displaced headers only at safepoint");
 303     markWord mark = obj->mark();
 304     if (mark.has_displaced_mark_helper()) {
 305       mark = mark.displaced_mark_helper();
 306     }
 307     narrowKlass nklass = mark.narrow_klass();
 308     archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original) LP64_ONLY(.set_narrow_klass(nklass)));
 309     assert(archived_oop->mark().is_unlocked(), "sanity");
 310 
 311     DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 312     assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 313 
 314     ArchivedObjectCache* cache = archived_object_cache();
 315     cache->put(obj, archived_oop);
 316     if (log_is_enabled(Debug, cds, heap)) {
 317       ResourceMark rm;
 318       log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
 319                            p2i(obj), p2i(archived_oop), obj->klass()->external_name());
 320     }
 321   } else {
 322     log_error(cds, heap)(
 323       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 324       p2i(obj));
 325     vm_direct_exit(-1,
 326       err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
 327               SIZE_FORMAT "M", MaxHeapSize/M));
 328   }
 329   return archived_oop;
 330 }
 331 
 332 void HeapShared::archive_klass_objects() {
 333   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
 334   assert(klasses != NULL, "sanity");
 335   for (int i = 0; i < klasses->length(); i++) {
 336     Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
 337 
 338     // archive mirror object
 339     java_lang_Class::archive_mirror(k);
 340 
 341     // archive the resolved_referenes array
 342     if (k->is_instance_klass()) {
 343       InstanceKlass* ik = InstanceKlass::cast(k);
 344       ik->constants()->archive_resolved_references();
 345     }
 346   }
 347 }
 348 
 349 void HeapShared::run_full_gc_in_vm_thread() {
 350   if (HeapShared::can_write()) {
 351     // Avoid fragmentation while archiving heap objects.
 352     // We do this inside a safepoint, so that no further allocation can happen after GC
 353     // has finished.
 354     if (GCLocker::is_active()) {
 355       // Just checking for safety ...
 356       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 357       // has been modified such that JNI code is executed in some clean up threads after
 358       // we have finished class loading.
 359       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 360     } else {
 361       log_info(cds)("Run GC ...");
 362       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 363       log_info(cds)("Run GC done");
 364     }
 365   }
 366 }
 367 
 368 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
 369                                  GrowableArray<MemRegion>* open_regions) {
 370 
 371   G1HeapVerifier::verify_ready_for_archiving();
 372 
 373   {
 374     NoSafepointVerifier nsv;
 375 
 376     // Cache for recording where the archived objects are copied to
 377     create_archived_object_cache();
 378 
 379     log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 380                   p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
 381     log_info(cds)("Dumping objects to closed archive heap region ...");
 382     copy_closed_objects(closed_regions);
 383 
 384     log_info(cds)("Dumping objects to open archive heap region ...");
 385     copy_open_objects(open_regions);
 386 
 387     destroy_archived_object_cache();
 388   }
 389 
 390   G1HeapVerifier::verify_archive_regions();
 391 }
 392 
 393 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
 394   assert(HeapShared::can_write(), "must be");
 395 
 396   G1CollectedHeap::heap()->begin_archive_alloc_range();
 397 
 398   // Archive interned string objects
 399   StringTable::write_to_archive(_dumped_interned_strings);
 400 
 401   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 402                            num_closed_archive_subgraph_entry_fields,
 403                            true /* is_closed_archive */,
 404                            false /* is_full_module_graph */);
 405 
 406   G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
 407                                                    os::vm_allocation_granularity());
 408 }
 409 
 410 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
 411   assert(HeapShared::can_write(), "must be");
 412 
 413   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 414 
 415   java_lang_Class::archive_basic_type_mirrors();
 416 
 417   archive_klass_objects();
 418 
 419   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 420                            num_open_archive_subgraph_entry_fields,
 421                            false /* is_closed_archive */,
 422                            false /* is_full_module_graph */);
 423   if (MetaspaceShared::use_full_module_graph()) {
 424     archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
 425                              num_fmg_open_archive_subgraph_entry_fields,
 426                              false /* is_closed_archive */,
 427                              true /* is_full_module_graph */);
 428     ClassLoaderDataShared::init_archived_oops();
 429   }
 430 
 431   copy_roots();
 432 
 433   G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
 434                                                    os::vm_allocation_granularity());
 435 }
 436 
 437 // Copy _pending_archive_roots into an objArray
 438 void HeapShared::copy_roots() {
 439   int length = _pending_roots != NULL ? _pending_roots->length() : 0;
 440   size_t size = objArrayOopDesc::object_size(length);
 441   Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
 442   HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
 443 
 444   memset(mem, 0, size * BytesPerWord);
 445   {
 446     // This is copied from MemAllocator::finish
 447     oopDesc::set_mark(mem, k->prototype_header());
 448     oopDesc::release_set_klass(mem, k);
 449   }
 450   {
 451     // This is copied from ObjArrayAllocator::initialize
 452     arrayOopDesc::set_length(mem, length);
 453   }
 454 
 455   _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
 456   for (int i = 0; i < length; i++) {
 457     roots()->obj_at_put(i, _pending_roots->at(i));
 458   }
 459   log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
 460 }
 461 
 462 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 463   _narrow_oop_base = base;
 464   _narrow_oop_shift = shift;
 465 }
 466 
 467 //
 468 // Subgraph archiving support
 469 //
 470 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 471 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 472 
 473 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 474 // there is no existing one for k. The subgraph_info records the relocated
 475 // Klass* of the original k.
 476 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
 477   assert(DumpSharedSpaces, "dump time only");
 478   bool created;
 479   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
 480   KlassSubGraphInfo* info =
 481     _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
 482                                                   &created);
 483   assert(created, "must not initialize twice");
 484   return info;
 485 }
 486 
 487 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 488   assert(DumpSharedSpaces, "dump time only");
 489   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
 490   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 491   assert(info != NULL, "must have been initialized");
 492   return info;
 493 }
 494 
 495 // Add an entry field to the current KlassSubGraphInfo.
 496 void KlassSubGraphInfo::add_subgraph_entry_field(
 497       int static_field_offset, oop v, bool is_closed_archive) {
 498   assert(DumpSharedSpaces, "dump time only");
 499   if (_subgraph_entry_fields == NULL) {
 500     _subgraph_entry_fields =
 501       new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
 502   }
 503   _subgraph_entry_fields->append(static_field_offset);
 504   _subgraph_entry_fields->append(HeapShared::append_root(v));
 505 }
 506 
 507 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 508 // Only objects of boot classes can be included in sub-graph.
 509 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
 510   assert(DumpSharedSpaces, "dump time only");
 511   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
 512 
 513   if (_subgraph_object_klasses == NULL) {
 514     _subgraph_object_klasses =
 515       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
 516   }
 517 
 518   assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class");
 519 
 520   if (_k == relocated_k) {
 521     // Don't add the Klass containing the sub-graph to it's own klass
 522     // initialization list.
 523     return;
 524   }
 525 
 526   if (relocated_k->is_instance_klass()) {
 527     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 528           "must be boot class");
 529     // vmClasses::xxx_klass() are not updated, need to check
 530     // the original Klass*
 531     if (orig_k == vmClasses::String_klass() ||
 532         orig_k == vmClasses::Object_klass()) {
 533       // Initialized early during VM initialization. No need to be added
 534       // to the sub-graph object class list.
 535       return;
 536     }
 537   } else if (relocated_k->is_objArray_klass()) {
 538     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 539     if (abk->is_instance_klass()) {
 540       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 541             "must be boot class");
 542     }
 543     if (relocated_k == Universe::objectArrayKlassObj()) {
 544       // Initialized early during Universe::genesis. No need to be added
 545       // to the list.
 546       return;
 547     }
 548   } else {
 549     assert(relocated_k->is_typeArray_klass(), "must be");
 550     // Primitive type arrays are created early during Universe::genesis.
 551     return;
 552   }
 553 
 554   if (log_is_enabled(Debug, cds, heap)) {
 555     if (!_subgraph_object_klasses->contains(relocated_k)) {
 556       ResourceMark rm;
 557       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 558     }
 559   }
 560 
 561   _subgraph_object_klasses->append_if_missing(relocated_k);
 562   _has_non_early_klasses |= is_non_early_klass(orig_k);
 563 }
 564 
 565 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
 566   if (k->is_objArray_klass()) {
 567     k = ObjArrayKlass::cast(k)->bottom_klass();
 568   }
 569   if (k->is_instance_klass()) {
 570     if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
 571       ResourceMark rm;
 572       log_info(cds, heap)("non-early: %s", k->external_name());
 573       return true;
 574     } else {
 575       return false;
 576     }
 577   } else {
 578     return false;
 579   }
 580 }
 581 
 582 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 583 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 584   _k = info->klass();
 585   _entry_field_records = NULL;
 586   _subgraph_object_klasses = NULL;
 587   _is_full_module_graph = info->is_full_module_graph();
 588 
 589   if (_is_full_module_graph) {
 590     // Consider all classes referenced by the full module graph as early -- we will be
 591     // allocating objects of these classes during JVMTI early phase, so they cannot
 592     // be processed by (non-early) JVMTI ClassFileLoadHook
 593     _has_non_early_klasses = false;
 594   } else {
 595     _has_non_early_klasses = info->has_non_early_klasses();
 596   }
 597 
 598   if (_has_non_early_klasses) {
 599     ResourceMark rm;
 600     log_info(cds, heap)(
 601           "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
 602           _k->external_name());
 603   }
 604 
 605   // populate the entry fields
 606   GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
 607   if (entry_fields != NULL) {
 608     int num_entry_fields = entry_fields->length();
 609     assert(num_entry_fields % 2 == 0, "sanity");
 610     _entry_field_records =
 611       ArchiveBuilder::new_ro_array<int>(num_entry_fields);
 612     for (int i = 0 ; i < num_entry_fields; i++) {
 613       _entry_field_records->at_put(i, entry_fields->at(i));
 614     }
 615   }
 616 
 617   // the Klasses of the objects in the sub-graphs
 618   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 619   if (subgraph_object_klasses != NULL) {
 620     int num_subgraphs_klasses = subgraph_object_klasses->length();
 621     _subgraph_object_klasses =
 622       ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
 623     for (int i = 0; i < num_subgraphs_klasses; i++) {
 624       Klass* subgraph_k = subgraph_object_klasses->at(i);
 625       if (log_is_enabled(Info, cds, heap)) {
 626         ResourceMark rm;
 627         log_info(cds, heap)(
 628           "Archived object klass %s (%2d) => %s",
 629           _k->external_name(), i, subgraph_k->external_name());
 630       }
 631       _subgraph_object_klasses->at_put(i, subgraph_k);
 632       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 633     }
 634   }
 635 
 636   ArchivePtrMarker::mark_pointer(&_k);
 637   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 638   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 639 }
 640 
 641 struct CopyKlassSubGraphInfoToArchive : StackObj {
 642   CompactHashtableWriter* _writer;
 643   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 644 
 645   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 646     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 647       ArchivedKlassSubGraphInfoRecord* record =
 648         (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 649       record->init(&info);
 650 
 651       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
 652       u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
 653       _writer->add(hash, delta);
 654     }
 655     return true; // keep on iterating
 656   }
 657 };
 658 
 659 // Build the records of archived subgraph infos, which include:
 660 // - Entry points to all subgraphs from the containing class mirror. The entry
 661 //   points are static fields in the mirror. For each entry point, the field
 662 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 663 //   info. The value is stored back to the corresponding field at runtime.
 664 // - A list of klasses that need to be loaded/initialized before archived
 665 //   java object sub-graph can be accessed at runtime.
 666 void HeapShared::write_subgraph_info_table() {
 667   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 668   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 669   CompactHashtableStats stats;
 670 
 671   _run_time_subgraph_info_table.reset();
 672 
 673   CompactHashtableWriter writer(d_table->_count, &stats);
 674   CopyKlassSubGraphInfoToArchive copy(&writer);
 675   d_table->iterate(&copy);
 676 
 677   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 678 }
 679 
 680 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 681   _run_time_subgraph_info_table.serialize_header(soc);
 682 }
 683 
 684 static void verify_the_heap(Klass* k, const char* which) {
 685   if (VerifyArchivedFields > 0) {
 686     ResourceMark rm;
 687     log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
 688                         which, k->external_name());
 689 
 690     VM_Verify verify_op;
 691     VMThread::execute(&verify_op);
 692 
 693     if (VerifyArchivedFields > 1 && is_init_completed()) {
 694       // At this time, the oop->klass() of some archived objects in the heap may not
 695       // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
 696       // have enough information (object size, oop maps, etc) so that a GC can be safely
 697       // performed.
 698       //
 699       // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
 700       // to check for GC safety.
 701       log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
 702                           which, k->external_name());
 703       FlagSetting fs1(VerifyBeforeGC, true);
 704       FlagSetting fs2(VerifyDuringGC, true);
 705       FlagSetting fs3(VerifyAfterGC,  true);
 706       Universe::heap()->collect(GCCause::_java_lang_system_gc);
 707     }
 708   }
 709 }
 710 
 711 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
 712 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
 713 //
 714 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
 715 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
 716 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
 717 void HeapShared::resolve_classes(JavaThread* THREAD) {
 718   if (!is_fully_available()) {
 719     return; // nothing to do
 720   }
 721   resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
 722                                 num_closed_archive_subgraph_entry_fields,
 723                                 THREAD);
 724   resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields,
 725                                 num_open_archive_subgraph_entry_fields,
 726                                 THREAD);
 727   resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields,
 728                                 num_fmg_open_archive_subgraph_entry_fields,
 729                                 THREAD);
 730 }
 731 
 732 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
 733                                                int num, JavaThread* THREAD) {
 734   for (int i = 0; i < num; i++) {
 735     ArchivableStaticFieldInfo* info = &fields[i];
 736     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
 737     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
 738     assert(k != NULL && k->is_shared_boot_class(), "sanity");
 739     resolve_classes_for_subgraph_of(k, THREAD);
 740   }
 741 }
 742 
 743 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) {
 744   ExceptionMark em(THREAD);
 745   const ArchivedKlassSubGraphInfoRecord* record =
 746    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
 747   if (HAS_PENDING_EXCEPTION) {
 748    CLEAR_PENDING_EXCEPTION;
 749   }
 750   if (record == NULL) {
 751    clear_archived_roots_of(k);
 752   }
 753 }
 754 
 755 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) {
 756   if (!is_fully_available()) {
 757     return; // nothing to do
 758   }
 759 
 760   ExceptionMark em(THREAD);
 761   const ArchivedKlassSubGraphInfoRecord* record =
 762     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
 763 
 764   if (HAS_PENDING_EXCEPTION) {
 765     CLEAR_PENDING_EXCEPTION;
 766     // None of the field value will be set if there was an exception when initializing the classes.
 767     // The java code will not see any of the archived objects in the
 768     // subgraphs referenced from k in this case.
 769     return;
 770   }
 771 
 772   if (record != NULL) {
 773     init_archived_fields_for(k, record);
 774   }
 775 }
 776 
 777 const ArchivedKlassSubGraphInfoRecord*
 778 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
 779   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 780 
 781   if (!k->is_shared()) {
 782     return NULL;
 783   }
 784   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 785   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 786 
 787   // Initialize from archived data. Currently this is done only
 788   // during VM initialization time. No lock is needed.
 789   if (record != NULL) {
 790     if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
 791       if (log_is_enabled(Info, cds, heap)) {
 792         ResourceMark rm(THREAD);
 793         log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
 794                             k->external_name());
 795       }
 796       return NULL;
 797     }
 798 
 799     if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
 800       if (log_is_enabled(Info, cds, heap)) {
 801         ResourceMark rm(THREAD);
 802         log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
 803                             k->external_name());
 804       }
 805       return NULL;
 806     }
 807 
 808     resolve_or_init(k, do_init, CHECK_NULL);
 809 
 810     // Load/link/initialize the klasses of the objects in the subgraph.
 811     // NULL class loader is used.
 812     Array<Klass*>* klasses = record->subgraph_object_klasses();
 813     if (klasses != NULL) {
 814       for (int i = 0; i < klasses->length(); i++) {
 815         Klass* klass = klasses->at(i);
 816         if (!klass->is_shared()) {
 817           return NULL;
 818         }
 819         resolve_or_init(klass, do_init, CHECK_NULL);
 820       }
 821     }
 822   }
 823 
 824   return record;
 825 }
 826 
 827 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
 828   if (!do_init) {
 829     if (k->class_loader_data() == NULL) {
 830       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
 831       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
 832     }
 833   } else {
 834     assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
 835     if (k->is_instance_klass()) {
 836       InstanceKlass* ik = InstanceKlass::cast(k);
 837       ik->initialize(CHECK);
 838     } else if (k->is_objArray_klass()) {
 839       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
 840       oak->initialize(CHECK);
 841     }
 842   }
 843 }
 844 
 845 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
 846   verify_the_heap(k, "before");
 847 
 848   // Load the subgraph entry fields from the record and store them back to
 849   // the corresponding fields within the mirror.
 850   oop m = k->java_mirror();
 851   Array<int>* entry_field_records = record->entry_field_records();
 852   if (entry_field_records != NULL) {
 853     int efr_len = entry_field_records->length();
 854     assert(efr_len % 2 == 0, "sanity");
 855     for (int i = 0; i < efr_len; i += 2) {
 856       int field_offset = entry_field_records->at(i);
 857       int root_index = entry_field_records->at(i+1);
 858       oop v = get_root(root_index, /*clear=*/true);
 859       m->obj_field_put(field_offset, v);
 860       log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 861     }
 862 
 863     // Done. Java code can see the archived sub-graphs referenced from k's
 864     // mirror after this point.
 865     if (log_is_enabled(Info, cds, heap)) {
 866       ResourceMark rm;
 867       log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
 868                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
 869     }
 870   }
 871 
 872   verify_the_heap(k, "after ");
 873 }
 874 
 875 void HeapShared::clear_archived_roots_of(Klass* k) {
 876   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 877   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 878   if (record != NULL) {
 879     Array<int>* entry_field_records = record->entry_field_records();
 880     if (entry_field_records != NULL) {
 881       int efr_len = entry_field_records->length();
 882       assert(efr_len % 2 == 0, "sanity");
 883       for (int i = 0; i < efr_len; i += 2) {
 884         int root_index = entry_field_records->at(i+1);
 885         clear_root(root_index);
 886       }
 887     }
 888   }
 889 }
 890 
 891 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 892   int _level;
 893   bool _is_closed_archive;
 894   bool _record_klasses_only;
 895   KlassSubGraphInfo* _subgraph_info;
 896   oop _orig_referencing_obj;
 897   oop _archived_referencing_obj;
 898  public:
 899   WalkOopAndArchiveClosure(int level,
 900                            bool is_closed_archive,
 901                            bool record_klasses_only,
 902                            KlassSubGraphInfo* subgraph_info,
 903                            oop orig, oop archived) :
 904     _level(level), _is_closed_archive(is_closed_archive),
 905     _record_klasses_only(record_klasses_only),
 906     _subgraph_info(subgraph_info),
 907     _orig_referencing_obj(orig), _archived_referencing_obj(archived) {}
 908   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 909   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 910 
 911  protected:
 912   template <class T> void do_oop_work(T *p) {
 913     oop obj = RawAccess<>::oop_load(p);
 914     if (!CompressedOops::is_null(obj)) {
 915       assert(!HeapShared::is_archived_object_during_dumptime(obj),
 916              "original objects must not point to archived objects");
 917 
 918       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 919       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
 920 
 921       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 922         ResourceMark rm;
 923         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
 924                              _orig_referencing_obj->klass()->external_name(), field_delta,
 925                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 926         LogTarget(Trace, cds, heap) log;
 927         LogStream out(log);
 928         obj->print_on(&out);
 929       }
 930 
 931       oop archived = HeapShared::archive_reachable_objects_from(
 932           _level + 1, _subgraph_info, obj, _is_closed_archive);
 933       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 934       assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
 935 
 936       if (!_record_klasses_only) {
 937         // Update the reference in the archived copy of the referencing object.
 938         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 939                              _level, p2i(new_p), p2i(obj), p2i(archived));
 940         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 941       }
 942     }
 943   }
 944 };
 945 
 946 void HeapShared::check_closed_region_object(InstanceKlass* k) {
 947   // Check fields in the object
 948   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 949     if (!fs.access_flags().is_static()) {
 950       BasicType ft = fs.field_descriptor().field_type();
 951       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
 952         ResourceMark rm;
 953         log_warning(cds, heap)(
 954           "Please check reference field in %s instance in closed archive heap region: %s %s",
 955           k->external_name(), (fs.name())->as_C_string(),
 956           (fs.signature())->as_C_string());
 957       }
 958     }
 959   }
 960 }
 961 
 962 void HeapShared::check_module_oop(oop orig_module_obj) {
 963   assert(DumpSharedSpaces, "must be");
 964   assert(java_lang_Module::is_instance(orig_module_obj), "must be");
 965   ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
 966   if (orig_module_ent == NULL) {
 967     // These special Module objects are created in Java code. They are not
 968     // defined via Modules::define_module(), so they don't have a ModuleEntry:
 969     //     java.lang.Module::ALL_UNNAMED_MODULE
 970     //     java.lang.Module::EVERYONE_MODULE
 971     //     jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
 972     assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
 973     log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
 974   } else {
 975     ClassLoaderData* loader_data = orig_module_ent->loader_data();
 976     assert(loader_data->is_builtin_class_loader_data(), "must be");
 977   }
 978 }
 979 
 980 
 981 // (1) If orig_obj has not been archived yet, archive it.
 982 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 983 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 984 // (3) Record the klasses of all orig_obj and all reachable objects.
 985 oop HeapShared::archive_reachable_objects_from(int level,
 986                                                KlassSubGraphInfo* subgraph_info,
 987                                                oop orig_obj,
 988                                                bool is_closed_archive) {
 989   assert(orig_obj != NULL, "must be");
 990   assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
 991 
 992   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
 993     // This object has injected fields that cannot be supported easily, so we disallow them for now.
 994     // If you get an error here, you probably made a change in the JDK library that has added
 995     // these objects that are referenced (directly or indirectly) by static fields.
 996     ResourceMark rm;
 997     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
 998     vm_direct_exit(1);
 999   }
1000 
1001   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
1002   // them as Klass::_archived_mirror because they need to be specially restored at run time.
1003   //
1004   // If you get an error here, you probably made a change in the JDK library that has added a Class
1005   // object that is referenced (directly or indirectly) by static fields.
1006   if (java_lang_Class::is_instance(orig_obj)) {
1007     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1008     vm_direct_exit(1);
1009   }
1010 
1011   oop archived_obj = find_archived_heap_object(orig_obj);
1012   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
1013     // To save time, don't walk strings that are already archived. They just contain
1014     // pointers to a type array, whose klass doesn't need to be recorded.
1015     return archived_obj;
1016   }
1017 
1018   if (has_been_seen_during_subgraph_recording(orig_obj)) {
1019     // orig_obj has already been archived and traced. Nothing more to do.
1020     return archived_obj;
1021   } else {
1022     set_has_been_seen_during_subgraph_recording(orig_obj);
1023   }
1024 
1025   bool record_klasses_only = (archived_obj != NULL);
1026   if (archived_obj == NULL) {
1027     ++_num_new_archived_objs;
1028     archived_obj = archive_object(orig_obj);
1029     if (archived_obj == NULL) {
1030       // Skip archiving the sub-graph referenced from the current entry field.
1031       ResourceMark rm;
1032       log_error(cds, heap)(
1033         "Cannot archive the sub-graph referenced from %s object ("
1034         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1035         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1036       if (level == 1) {
1037         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1038         // as the Java code will take care of initializing this field dynamically.
1039         return NULL;
1040       } else {
1041         // We don't know how to handle an object that has been archived, but some of its reachable
1042         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1043         // we have a real use case.
1044         vm_direct_exit(1);
1045       }
1046     }
1047 
1048     if (java_lang_Module::is_instance(orig_obj)) {
1049       check_module_oop(orig_obj);
1050       java_lang_Module::set_module_entry(archived_obj, NULL);
1051       java_lang_Module::set_loader(archived_obj, NULL);
1052     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1053       // class_data will be restored explicitly at run time.
1054       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
1055                 orig_obj == SystemDictionary::java_system_loader() ||
1056                 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
1057       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
1058     }
1059   }
1060 
1061   assert(archived_obj != NULL, "must be");
1062   Klass *orig_k = orig_obj->klass();
1063   subgraph_info->add_subgraph_object_klass(orig_k);
1064 
1065   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1066                                   subgraph_info, orig_obj, archived_obj);
1067   orig_obj->oop_iterate(&walker);
1068   if (is_closed_archive && orig_k->is_instance_klass()) {
1069     check_closed_region_object(InstanceKlass::cast(orig_k));
1070   }
1071   return archived_obj;
1072 }
1073 
1074 //
1075 // Start from the given static field in a java mirror and archive the
1076 // complete sub-graph of java heap objects that are reached directly
1077 // or indirectly from the starting object by following references.
1078 // Sub-graph archiving restrictions (current):
1079 //
1080 // - All classes of objects in the archived sub-graph (including the
1081 //   entry class) must be boot class only.
1082 // - No java.lang.Class instance (java mirror) can be included inside
1083 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
1084 //
1085 // The Java heap object sub-graph archiving process (see
1086 // WalkOopAndArchiveClosure):
1087 //
1088 // 1) Java object sub-graph archiving starts from a given static field
1089 // within a Class instance (java mirror). If the static field is a
1090 // refererence field and points to a non-null java object, proceed to
1091 // the next step.
1092 //
1093 // 2) Archives the referenced java object. If an archived copy of the
1094 // current object already exists, updates the pointer in the archived
1095 // copy of the referencing object to point to the current archived object.
1096 // Otherwise, proceed to the next step.
1097 //
1098 // 3) Follows all references within the current java object and recursively
1099 // archive the sub-graph of objects starting from each reference.
1100 //
1101 // 4) Updates the pointer in the archived copy of referencing object to
1102 // point to the current archived object.
1103 //
1104 // 5) The Klass of the current java object is added to the list of Klasses
1105 // for loading and initialzing before any object in the archived graph can
1106 // be accessed at runtime.
1107 //
1108 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1109                                                              const char* klass_name,
1110                                                              int field_offset,
1111                                                              const char* field_name,
1112                                                              bool is_closed_archive) {
1113   assert(DumpSharedSpaces, "dump time only");
1114   assert(k->is_shared_boot_class(), "must be boot class");
1115 
1116   oop m = k->java_mirror();
1117 
1118   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1119   oop f = m->obj_field(field_offset);
1120 
1121   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1122 
1123   if (!CompressedOops::is_null(f)) {
1124     if (log_is_enabled(Trace, cds, heap)) {
1125       LogTarget(Trace, cds, heap) log;
1126       LogStream out(log);
1127       f->print_on(&out);
1128     }
1129 
1130     oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1131 
1132     if (af == NULL) {
1133       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1134                            klass_name, field_name);
1135     } else {
1136       // Note: the field value is not preserved in the archived mirror.
1137       // Record the field as a new subGraph entry point. The recorded
1138       // information is restored from the archive at runtime.
1139       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1140       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
1141     }
1142   } else {
1143     // The field contains null, we still need to record the entry point,
1144     // so it can be restored at runtime.
1145     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
1146   }
1147 }
1148 
1149 #ifndef PRODUCT
1150 class VerifySharedOopClosure: public BasicOopIterateClosure {
1151  private:
1152   bool _is_archived;
1153 
1154  public:
1155   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1156 
1157   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1158   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1159 
1160  protected:
1161   template <class T> void do_oop_work(T *p) {
1162     oop obj = RawAccess<>::oop_load(p);
1163     if (!CompressedOops::is_null(obj)) {
1164       HeapShared::verify_reachable_objects_from(obj, _is_archived);
1165     }
1166   }
1167 };
1168 
1169 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1170   assert(DumpSharedSpaces, "dump time only");
1171   assert(k->is_shared_boot_class(), "must be boot class");
1172 
1173   oop m = k->java_mirror();
1174   oop f = m->obj_field(field_offset);
1175   if (!CompressedOops::is_null(f)) {
1176     verify_subgraph_from(f);
1177   }
1178 }
1179 
1180 void HeapShared::verify_subgraph_from(oop orig_obj) {
1181   oop archived_obj = find_archived_heap_object(orig_obj);
1182   if (archived_obj == NULL) {
1183     // It's OK for the root of a subgraph to be not archived. See comments in
1184     // archive_reachable_objects_from().
1185     return;
1186   }
1187 
1188   // Verify that all objects reachable from orig_obj are archived.
1189   init_seen_objects_table();
1190   verify_reachable_objects_from(orig_obj, false);
1191   delete_seen_objects_table();
1192 
1193   // Note: we could also verify that all objects reachable from the archived
1194   // copy of orig_obj can only point to archived objects, with:
1195   //      init_seen_objects_table();
1196   //      verify_reachable_objects_from(archived_obj, true);
1197   //      init_seen_objects_table();
1198   // but that's already done in G1HeapVerifier::verify_archive_regions so we
1199   // won't do it here.
1200 }
1201 
1202 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1203   _num_total_verifications ++;
1204   if (!has_been_seen_during_subgraph_recording(obj)) {
1205     set_has_been_seen_during_subgraph_recording(obj);
1206 
1207     if (is_archived) {
1208       assert(is_archived_object_during_dumptime(obj), "must be");
1209       assert(find_archived_heap_object(obj) == NULL, "must be");
1210     } else {
1211       assert(!is_archived_object_during_dumptime(obj), "must be");
1212       assert(find_archived_heap_object(obj) != NULL, "must be");
1213     }
1214 
1215     VerifySharedOopClosure walker(is_archived);
1216     obj->oop_iterate(&walker);
1217   }
1218 }
1219 #endif
1220 
1221 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
1222 int HeapShared::_num_new_walked_objs;
1223 int HeapShared::_num_new_archived_objs;
1224 int HeapShared::_num_old_recorded_klasses;
1225 
1226 int HeapShared::_num_total_subgraph_recordings = 0;
1227 int HeapShared::_num_total_walked_objs = 0;
1228 int HeapShared::_num_total_archived_objs = 0;
1229 int HeapShared::_num_total_recorded_klasses = 0;
1230 int HeapShared::_num_total_verifications = 0;
1231 
1232 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1233   return _seen_objects_table->get(obj) != NULL;
1234 }
1235 
1236 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1237   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1238   _seen_objects_table->put(obj, true);
1239   ++ _num_new_walked_objs;
1240 }
1241 
1242 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1243   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1244   init_subgraph_info(k, is_full_module_graph);
1245   init_seen_objects_table();
1246   _num_new_walked_objs = 0;
1247   _num_new_archived_objs = 0;
1248   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1249 }
1250 
1251 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1252   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1253     _num_old_recorded_klasses;
1254   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1255                       "walked %d objs, archived %d new objs, recorded %d classes",
1256                       class_name, _num_new_walked_objs, _num_new_archived_objs,
1257                       num_new_recorded_klasses);
1258 
1259   delete_seen_objects_table();
1260 
1261   _num_total_subgraph_recordings ++;
1262   _num_total_walked_objs      += _num_new_walked_objs;
1263   _num_total_archived_objs    += _num_new_archived_objs;
1264   _num_total_recorded_klasses +=  num_new_recorded_klasses;
1265 }
1266 
1267 class ArchivableStaticFieldFinder: public FieldClosure {
1268   InstanceKlass* _ik;
1269   Symbol* _field_name;
1270   bool _found;
1271   int _offset;
1272 public:
1273   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1274     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1275 
1276   virtual void do_field(fieldDescriptor* fd) {
1277     if (fd->name() == _field_name) {
1278       assert(!_found, "fields cannot be overloaded");
1279       assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
1280       _found = true;
1281       _offset = fd->offset();
1282     }
1283   }
1284   bool found()     { return _found;  }
1285   int offset()     { return _offset; }
1286 };
1287 
1288 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1289                                             int num, TRAPS) {
1290   for (int i = 0; i < num; i++) {
1291     ArchivableStaticFieldInfo* info = &fields[i];
1292     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1293     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1294 
1295     Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK);
1296     InstanceKlass* ik = InstanceKlass::cast(k);
1297     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1298            "Only support boot classes");
1299     ik->initialize(CHECK);
1300 
1301     ArchivableStaticFieldFinder finder(ik, field_name);
1302     ik->do_local_static_fields(&finder);
1303     assert(finder.found(), "field must exist");
1304 
1305     info->klass = ik;
1306     info->offset = finder.offset();
1307   }
1308 }
1309 
1310 void HeapShared::init_subgraph_entry_fields(TRAPS) {
1311   assert(HeapShared::can_write(), "must be");
1312   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1313   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1314                              num_closed_archive_subgraph_entry_fields,
1315                              CHECK);
1316   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1317                              num_open_archive_subgraph_entry_fields,
1318                              CHECK);
1319   if (MetaspaceShared::use_full_module_graph()) {
1320     init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields,
1321                                num_fmg_open_archive_subgraph_entry_fields,
1322                                CHECK);
1323   }
1324 }
1325 
1326 void HeapShared::init_for_dumping(TRAPS) {
1327   if (HeapShared::can_write()) {
1328     _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1329     init_subgraph_entry_fields(CHECK);
1330   }
1331 }
1332 
1333 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1334                                           int num, bool is_closed_archive,
1335                                           bool is_full_module_graph) {
1336   _num_total_subgraph_recordings = 0;
1337   _num_total_walked_objs = 0;
1338   _num_total_archived_objs = 0;
1339   _num_total_recorded_klasses = 0;
1340   _num_total_verifications = 0;
1341 
1342   // For each class X that has one or more archived fields:
1343   // [1] Dump the subgraph of each archived field
1344   // [2] Create a list of all the class of the objects that can be reached
1345   //     by any of these static fields.
1346   //     At runtime, these classes are initialized before X's archived fields
1347   //     are restored by HeapShared::initialize_from_archived_subgraph().
1348   int i;
1349   for (i = 0; i < num; ) {
1350     ArchivableStaticFieldInfo* info = &fields[i];
1351     const char* klass_name = info->klass_name;
1352     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1353 
1354     // If you have specified consecutive fields of the same klass in
1355     // fields[], these will be archived in the same
1356     // {start_recording_subgraph ... done_recording_subgraph} pass to
1357     // save time.
1358     for (; i < num; i++) {
1359       ArchivableStaticFieldInfo* f = &fields[i];
1360       if (f->klass_name != klass_name) {
1361         break;
1362       }
1363 
1364       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1365                                                   f->offset, f->field_name,
1366                                                   is_closed_archive);
1367     }
1368     done_recording_subgraph(info->klass, klass_name);
1369   }
1370 
1371   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1372                       is_closed_archive ? "closed" : "open",
1373                       _num_total_subgraph_recordings);
1374   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1375   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1376   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1377 
1378 #ifndef PRODUCT
1379   for (int i = 0; i < num; i++) {
1380     ArchivableStaticFieldInfo* f = &fields[i];
1381     verify_subgraph_from_static_field(f->klass, f->offset);
1382   }
1383   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1384 #endif
1385 }
1386 
1387 // Not all the strings in the global StringTable are dumped into the archive, because
1388 // some of those strings may be only referenced by classes that are excluded from
1389 // the archive. We need to explicitly mark the strings that are:
1390 //   [1] used by classes that WILL be archived;
1391 //   [2] included in the SharedArchiveConfigFile.
1392 void HeapShared::add_to_dumped_interned_strings(oop string) {
1393   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1394   bool created;
1395   _dumped_interned_strings->put_if_absent(string, true, &created);
1396 }
1397 
1398 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1399 // region. This way we can quickly relocate all the pointers without using
1400 // BasicOopIterateClosure at runtime.
1401 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1402   narrowOop* _start;
1403   BitMap *_oopmap;
1404   int _num_total_oops;
1405   int _num_null_oops;
1406  public:
1407   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1408     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1409 
1410   virtual void do_oop(narrowOop* p) {
1411     _num_total_oops ++;
1412     narrowOop v = *p;
1413     if (!CompressedOops::is_null(v)) {
1414       size_t idx = p - _start;
1415       _oopmap->set_bit(idx);
1416     } else {
1417       _num_null_oops ++;
1418     }
1419   }
1420   virtual void do_oop(oop *p) {
1421     ShouldNotReachHere();
1422   }
1423   int num_total_oops() const { return _num_total_oops; }
1424   int num_null_oops()  const { return _num_null_oops; }
1425 };
1426 
1427 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1428   assert(UseCompressedOops, "must be");
1429   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1430   ResourceBitMap oopmap(num_bits);
1431 
1432   HeapWord* p   = region.start();
1433   HeapWord* end = region.end();
1434   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1435   ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
1436 
1437   int num_objs = 0;
1438   while (p < end) {
1439     oop o = cast_to_oop(p);
1440     o->oop_iterate(&finder);
1441     p += o->size();
1442     if (DumpSharedSpaces) {
1443       builder->relocate_klass_ptr(o);
1444     }
1445     ++ num_objs;
1446   }
1447 
1448   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1449                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1450   return oopmap;
1451 }
1452 
1453 // Patch all the embedded oop pointers inside an archived heap region,
1454 // to be consistent with the runtime oop encoding.
1455 class PatchEmbeddedPointers: public BitMapClosure {
1456   narrowOop* _start;
1457 
1458  public:
1459   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1460 
1461   bool do_bit(size_t offset) {
1462     narrowOop* p = _start + offset;
1463     narrowOop v = *p;
1464     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1465     oop o = HeapShared::decode_from_archive(v);
1466     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1467     return true;
1468   }
1469 };
1470 
1471 // Patch all the non-null pointers that are embedded in the archived heap objects
1472 // in this region
1473 void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap,
1474                                          size_t oopmap_size_in_bits) {
1475   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1476 
1477 #ifndef PRODUCT
1478   ResourceMark rm;
1479   ResourceBitMap checkBm = calculate_oopmap(region);
1480   assert(bm.is_same(checkBm), "sanity");
1481 #endif
1482 
1483   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1484   bm.iterate(&patcher);
1485 }
1486 
1487 // The CDS archive remembers each heap object by its address at dump time, but
1488 // the heap object may be loaded at a different address at run time. This structure is used
1489 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
1490 // to their runtime addresses.
1491 struct LoadedArchiveHeapRegion {
1492   int       _region_index;   // index for FileMapInfo::space_at(index)
1493   size_t    _region_size;    // number of bytes in this region
1494   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
1495   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
1496                              // runtime address is P + _runtime_offset
1497 
1498   static int comparator(const void* a, const void* b) {
1499     LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a;
1500     LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b;
1501     if (reg_a->_dumptime_base < reg_b->_dumptime_base) {
1502       return -1;
1503     } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) {
1504       return 0;
1505     } else {
1506       return 1;
1507     }
1508   }
1509 
1510   uintptr_t top() {
1511     return _dumptime_base + _region_size;
1512   }
1513 };
1514 
1515 void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions,
1516                                              int num_loaded_regions) {
1517   _dumptime_base_0 = loaded_regions[0]._dumptime_base;
1518   _dumptime_base_1 = loaded_regions[1]._dumptime_base;
1519   _dumptime_base_2 = loaded_regions[2]._dumptime_base;
1520   _dumptime_base_3 = loaded_regions[3]._dumptime_base;
1521   _dumptime_top = loaded_regions[num_loaded_regions-1].top();
1522 
1523   _runtime_offset_0 = loaded_regions[0]._runtime_offset;
1524   _runtime_offset_1 = loaded_regions[1]._runtime_offset;
1525   _runtime_offset_2 = loaded_regions[2]._runtime_offset;
1526   _runtime_offset_3 = loaded_regions[3]._runtime_offset;
1527 
1528   assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be");
1529   if (num_loaded_regions < 4) {
1530     _dumptime_base_3 = UINTPTR_MAX;
1531   }
1532   if (num_loaded_regions < 3) {
1533     _dumptime_base_2 = UINTPTR_MAX;
1534   }
1535 }
1536 
1537 bool HeapShared::can_load() {
1538   return Universe::heap()->can_load_archived_objects();
1539 }
1540 
1541 template <int NUM_LOADED_REGIONS>
1542 class PatchLoadedRegionPointers: public BitMapClosure {
1543   narrowOop* _start;
1544   intx _offset_0;
1545   intx _offset_1;
1546   intx _offset_2;
1547   intx _offset_3;
1548   uintptr_t _base_0;
1549   uintptr_t _base_1;
1550   uintptr_t _base_2;
1551   uintptr_t _base_3;
1552   uintptr_t _top;
1553 
1554   static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
1555   static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
1556   static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
1557 
1558  public:
1559   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions)
1560     : _start(start),
1561       _offset_0(loaded_regions[0]._runtime_offset),
1562       _offset_1(loaded_regions[1]._runtime_offset),
1563       _offset_2(loaded_regions[2]._runtime_offset),
1564       _offset_3(loaded_regions[3]._runtime_offset),
1565       _base_0(loaded_regions[0]._dumptime_base),
1566       _base_1(loaded_regions[1]._dumptime_base),
1567       _base_2(loaded_regions[2]._dumptime_base),
1568       _base_3(loaded_regions[3]._dumptime_base) {
1569     _top = loaded_regions[NUM_LOADED_REGIONS-1].top();
1570   }
1571 
1572   bool do_bit(size_t offset) {
1573     narrowOop* p = _start + offset;
1574     narrowOop v = *p;
1575     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1576     uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v));
1577     assert(_base_0 <= o && o < _top, "must be");
1578 
1579 
1580     // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons.
1581     if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
1582       o += _offset_3;
1583     } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
1584       o += _offset_2;
1585     } else if (o >= _base_1) {
1586       o += _offset_1;
1587     } else {
1588       o += _offset_0;
1589     }
1590     HeapShared::assert_in_loaded_heap(o);
1591     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
1592     return true;
1593   }
1594 };
1595 
1596 int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1597                                     MemRegion& archive_space) {
1598   size_t total_bytes = 0;
1599   int num_loaded_regions = 0;
1600   for (int i = MetaspaceShared::first_archive_heap_region;
1601        i <= MetaspaceShared::last_archive_heap_region; i++) {
1602     FileMapRegion* r = mapinfo->space_at(i);
1603     r->assert_is_heap_region();
1604     if (r->used() > 0) {
1605       assert(is_aligned(r->used(), HeapWordSize), "must be");
1606       total_bytes += r->used();
1607       LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
1608       ri->_region_index = i;
1609       ri->_region_size = r->used();
1610       ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r);
1611     }
1612   }
1613 
1614   assert(is_aligned(total_bytes, HeapWordSize), "must be");
1615   size_t word_size = total_bytes / HeapWordSize;
1616   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
1617   if (buffer == nullptr) {
1618     return 0;
1619   }
1620 
1621   archive_space = MemRegion(buffer, word_size);
1622   _loaded_heap_bottom = (uintptr_t)archive_space.start();
1623   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
1624 
1625   return num_loaded_regions;
1626 }
1627 
1628 void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
1629                                      uintptr_t buffer) {
1630   // Find the relocation offset of the pointers in each region
1631   qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
1632         LoadedArchiveHeapRegion::comparator);
1633 
1634   uintptr_t p = buffer;
1635   for (int i = 0; i < num_loaded_regions; i++) {
1636     // This region will be loaded at p, so all objects inside this
1637     // region will be shifted by ri->offset
1638     LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1639     ri->_runtime_offset = p - ri->_dumptime_base;
1640     p += ri->_region_size;
1641   }
1642   assert(p == _loaded_heap_top, "must be");
1643 }
1644 
1645 bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1646                               int num_loaded_regions, uintptr_t buffer) {
1647   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
1648   uintptr_t load_address = buffer;
1649   for (int i = 0; i < num_loaded_regions; i++) {
1650     LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1651     FileMapRegion* r = mapinfo->space_at(ri->_region_index);
1652 
1653     if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
1654       // There's no easy way to free the buffer, so we will fill it with zero later
1655       // in fill_failed_loaded_region(), and it will eventually be GC'ed.
1656       log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i);
1657       _loading_failed = true;
1658       return false;
1659     }
1660     log_info(cds)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
1661                   " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
1662                   ri->_region_index, load_address, load_address + ri->_region_size,
1663                   ri->_region_size, ri->_runtime_offset);
1664 
1665     uintptr_t oopmap = bitmap_base + r->oopmap_offset();
1666     BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
1667 
1668     if (num_loaded_regions == 4) {
1669       PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions);
1670       bm.iterate(&patcher);
1671     } else if (num_loaded_regions == 3) {
1672       PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions);
1673       bm.iterate(&patcher);
1674     } else {
1675       assert(num_loaded_regions == 2, "must be");
1676       PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions);
1677       bm.iterate(&patcher);
1678     }
1679 
1680     load_address += r->used();
1681   }
1682 
1683   return true;
1684 }
1685 
1686 bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) {
1687   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
1688 
1689   LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions];
1690   memset(loaded_regions, 0, sizeof(loaded_regions));
1691 
1692   MemRegion archive_space;
1693   int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space);
1694   if (num_loaded_regions <= 0) {
1695     return false;
1696   }
1697   sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start());
1698   if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) {
1699     assert(_loading_failed, "must be");
1700     return false;
1701   }
1702 
1703   init_loaded_heap_relocation(loaded_regions, num_loaded_regions);
1704   _is_loaded = true;
1705   set_roots(mapinfo->heap_obj_roots());
1706 
1707   return true;
1708 }
1709 
1710 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
1711   ResourceHashtable<uintptr_t, bool>* _table;
1712 
1713  public:
1714   VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
1715 
1716   virtual void do_oop(narrowOop* p) {
1717     // This should be called before the loaded regions are modified, so all the embedded pointers
1718     // must be NULL, or must point to a valid object in the loaded regions.
1719     narrowOop v = *p;
1720     if (!CompressedOops::is_null(v)) {
1721       oop o = CompressedOops::decode_not_null(v);
1722       uintptr_t u = cast_from_oop<uintptr_t>(o);
1723       HeapShared::assert_in_loaded_heap(u);
1724       guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions");
1725     }
1726   }
1727   virtual void do_oop(oop* p) {
1728     ShouldNotReachHere();
1729   }
1730 };
1731 
1732 void HeapShared::finish_initialization() {
1733   if (is_loaded()) {
1734     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1735     HeapWord* top    = (HeapWord*)_loaded_heap_top;
1736 
1737     MemRegion archive_space = MemRegion(bottom, top);
1738     Universe::heap()->complete_loaded_archive_space(archive_space);
1739   }
1740 
1741   if (VerifyArchivedFields <= 0 || !is_loaded()) {
1742     return;
1743   }
1744 
1745   log_info(cds, heap)("Verify all oops and pointers in loaded heap");
1746 
1747   ResourceMark rm;
1748   ResourceHashtable<uintptr_t, bool> table;
1749   VerifyLoadedHeapEmbeddedPointers verifier(&table);
1750   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1751   HeapWord* top    = (HeapWord*)_loaded_heap_top;
1752 
1753   for (HeapWord* p = bottom; p < top; ) {
1754     oop o = cast_to_oop(p);
1755     table.put(cast_from_oop<uintptr_t>(o), true);
1756     p += o->size();
1757   }
1758 
1759   for (HeapWord* p = bottom; p < top; ) {
1760     oop o = cast_to_oop(p);
1761     o->oop_iterate(&verifier);
1762     p += o->size();
1763   }
1764 }
1765 
1766 void HeapShared::fill_failed_loaded_region() {
1767   assert(_loading_failed, "must be");
1768   if (_loaded_heap_bottom != 0) {
1769     assert(_loaded_heap_top != 0, "must be");
1770     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1771     HeapWord* top = (HeapWord*)_loaded_heap_top;
1772     Universe::heap()->fill_with_objects(bottom, top - bottom);
1773   }
1774 }
1775 
1776 #endif // INCLUDE_CDS_JAVA_HEAP