1 /*
   2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveUtils.hpp"
  28 #include "cds/filemap.hpp"
  29 #include "cds/heapShared.inline.hpp"
  30 #include "cds/metaspaceShared.hpp"
  31 #include "classfile/classLoaderData.hpp"
  32 #include "classfile/classLoaderDataShared.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/moduleEntry.hpp"
  35 #include "classfile/stringTable.hpp"
  36 #include "classfile/symbolTable.hpp"
  37 #include "classfile/systemDictionary.hpp"
  38 #include "classfile/systemDictionaryShared.hpp"
  39 #include "classfile/vmClasses.hpp"
  40 #include "classfile/vmSymbols.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "gc/shared/gcVMOperations.hpp"
  44 #include "logging/log.hpp"
  45 #include "logging/logMessage.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metadataFactory.hpp"
  49 #include "memory/metaspaceClosure.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "memory/universe.hpp"
  52 #include "oops/compressedOops.inline.hpp"
  53 #include "oops/fieldStreams.inline.hpp"
  54 #include "oops/objArrayOop.hpp"
  55 #include "oops/oop.inline.hpp"
  56 #include "prims/jvmtiExport.hpp"
  57 #include "runtime/fieldDescriptor.inline.hpp"
  58 #include "runtime/globals_extension.hpp"
  59 #include "runtime/init.hpp"
  60 #include "runtime/java.hpp"
  61 #include "runtime/javaCalls.hpp"
  62 #include "runtime/safepointVerifiers.hpp"
  63 #include "utilities/bitMap.inline.hpp"
  64 #include "utilities/copy.hpp"
  65 #if INCLUDE_G1GC
  66 #include "gc/g1/g1CollectedHeap.hpp"
  67 #endif
  68 
  69 #if INCLUDE_CDS_JAVA_HEAP
  70 
  71 bool HeapShared::_closed_regions_mapped = false;
  72 bool HeapShared::_open_regions_mapped = false;
  73 bool HeapShared::_is_loaded = false;
  74 address   HeapShared::_narrow_oop_base;
  75 int       HeapShared::_narrow_oop_shift;
  76 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
  77 
  78 uintptr_t HeapShared::_loaded_heap_bottom = 0;
  79 uintptr_t HeapShared::_loaded_heap_top = 0;
  80 uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX;
  81 uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX;
  82 uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX;
  83 uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX;
  84 uintptr_t HeapShared::_dumptime_top    = 0;
  85 intx HeapShared::_runtime_offset_0 = 0;
  86 intx HeapShared::_runtime_offset_1 = 0;
  87 intx HeapShared::_runtime_offset_2 = 0;
  88 intx HeapShared::_runtime_offset_3 = 0;
  89 bool HeapShared::_loading_failed = false;
  90 //
  91 // If you add new entries to the following tables, you should know what you're doing!
  92 //
  93 
  94 // Entry fields for shareable subgraphs archived in the closed archive heap
  95 // region. Warning: Objects in the subgraphs should not have reference fields
  96 // assigned at runtime.
  97 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
  98   {"java/lang/Integer$IntegerCache",              "archivedCache"},
  99   {"java/lang/Long$LongCache",                    "archivedCache"},
 100   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 101   {"java/lang/Short$ShortCache",                  "archivedCache"},
 102   {"java/lang/Character$CharacterCache",          "archivedCache"},
 103   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 104   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 105 };
 106 // Entry fields for subgraphs archived in the open archive heap region.
 107 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
 108   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 109   {"java/util/ImmutableCollections",              "archivedObjects"},
 110   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 111   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 112   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 113 };
 114 
 115 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
 116 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
 117   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 118   {"jdk/internal/module/ArchivedBootLayer",       "archivedBootLayer"},
 119   {"java/lang/Module$ArchivedData",               "archivedData"},
 120 };
 121 
 122 const static int num_closed_archive_subgraph_entry_fields =
 123   sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 124 const static int num_open_archive_subgraph_entry_fields =
 125   sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 126 const static int num_fmg_open_archive_subgraph_entry_fields =
 127   sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
 128 
 129 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
 130 narrowOop HeapShared::_roots_narrow;
 131 OopHandle HeapShared::_roots;
 132 
 133 #ifdef ASSERT
 134 bool HeapShared::is_archived_object_during_dumptime(oop p) {
 135   assert(HeapShared::can_write(), "must be");
 136   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
 137   return Universe::heap()->is_archived_object(p);
 138 }
 139 #endif
 140 
 141 ////////////////////////////////////////////////////////////////
 142 //
 143 // Java heap object archiving support
 144 //
 145 ////////////////////////////////////////////////////////////////
 146 void HeapShared::fixup_regions() {
 147   FileMapInfo* mapinfo = FileMapInfo::current_info();
 148   if (is_mapped()) {
 149     mapinfo->fixup_mapped_heap_regions();
 150   } else if (_loading_failed) {
 151     fill_failed_loaded_region();
 152   }
 153   if (is_fully_available()) {
 154     _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow));
 155     if (!MetaspaceShared::use_full_module_graph()) {
 156       // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
 157       ClassLoaderDataShared::clear_archived_oops();
 158     }
 159   }
 160   SystemDictionaryShared::update_archived_mirror_native_pointers();
 161 }
 162 
 163 unsigned HeapShared::oop_hash(oop const& p) {
 164   unsigned hash = (unsigned)p->identity_hash();
 165   return hash;
 166 }
 167 
 168 static void reset_states(oop obj, TRAPS) {
 169   Handle h_obj(THREAD, obj);
 170   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 171   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 172   Symbol* method_sig = vmSymbols::void_method_signature();
 173 
 174   while (klass != NULL) {
 175     Method* method = klass->find_method(method_name, method_sig);
 176     if (method != NULL) {
 177       assert(method->is_private(), "must be");
 178       if (log_is_enabled(Debug, cds)) {
 179         ResourceMark rm(THREAD);
 180         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 181       }
 182       JavaValue result(T_VOID);
 183       JavaCalls::call_special(&result, h_obj, klass,
 184                               method_name, method_sig, CHECK);
 185     }
 186     klass = klass->java_super();
 187   }
 188 }
 189 
 190 void HeapShared::reset_archived_object_states(TRAPS) {
 191   assert(DumpSharedSpaces, "dump-time only");
 192   log_debug(cds)("Resetting platform loader");
 193   reset_states(SystemDictionary::java_platform_loader(), CHECK);
 194   log_debug(cds)("Resetting system loader");
 195   reset_states(SystemDictionary::java_system_loader(), CHECK);
 196 }
 197 
 198 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 199 oop HeapShared::find_archived_heap_object(oop obj) {
 200   assert(DumpSharedSpaces, "dump-time only");
 201   ArchivedObjectCache* cache = archived_object_cache();
 202   oop* p = cache->get(obj);
 203   if (p != NULL) {
 204     return *p;
 205   } else {
 206     return NULL;
 207   }
 208 }
 209 
 210 int HeapShared::append_root(oop obj) {
 211   assert(DumpSharedSpaces, "dump-time only");
 212 
 213   // No GC should happen since we aren't scanning _pending_roots.
 214   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 215 
 216   if (_pending_roots == NULL) {
 217     _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 218   }
 219 
 220   return _pending_roots->append(obj);
 221 }
 222 
 223 objArrayOop HeapShared::roots() {
 224   if (DumpSharedSpaces) {
 225     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 226     if (!HeapShared::can_write()) {
 227       return NULL;
 228     }
 229   } else {
 230     assert(UseSharedSpaces, "must be");
 231   }
 232 
 233   objArrayOop roots = (objArrayOop)_roots.resolve();
 234   assert(roots != NULL, "should have been initialized");
 235   return roots;
 236 }
 237 
 238 void HeapShared::set_roots(narrowOop roots) {
 239   assert(UseSharedSpaces, "runtime only");
 240   assert(is_fully_available(), "must be");
 241   _roots_narrow = roots;
 242 }
 243 
 244 // Returns an objArray that contains all the roots of the archived objects
 245 oop HeapShared::get_root(int index, bool clear) {
 246   assert(index >= 0, "sanity");
 247   if (DumpSharedSpaces) {
 248     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 249     assert(_pending_roots != NULL, "sanity");
 250     return _pending_roots->at(index);
 251   } else {
 252     assert(UseSharedSpaces, "must be");
 253     assert(!_roots.is_empty(), "must have loaded shared heap");
 254     oop result = roots()->obj_at(index);
 255     if (clear) {
 256       clear_root(index);
 257     }
 258     return result;
 259   }
 260 }
 261 
 262 void HeapShared::clear_root(int index) {
 263   assert(index >= 0, "sanity");
 264   assert(UseSharedSpaces, "must be");
 265   if (is_fully_available()) {
 266     if (log_is_enabled(Debug, cds, heap)) {
 267       oop old = roots()->obj_at(index);
 268       log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
 269     }
 270     roots()->obj_at_put(index, NULL);
 271   }
 272 }
 273 
 274 oop HeapShared::archive_object(oop obj) {
 275   assert(DumpSharedSpaces, "dump-time only");
 276 
 277   oop ao = find_archived_heap_object(obj);
 278   if (ao != NULL) {
 279     // already archived
 280     return ao;
 281   }
 282 
 283   int len = obj->size();
 284   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 285     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 286                          p2i(obj), (size_t)obj->size());
 287     return NULL;
 288   }
 289 
 290   oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
 291   if (archived_oop != NULL) {
 292     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 293     // Reinitialize markword to remove age/marking/locking/etc.
 294     //
 295     // We need to retain the identity_hash, because it may have been used by some hashtables
 296     // in the shared heap. This also has the side effect of pre-initializing the
 297     // identity_hash for all shared objects, so they are less likely to be written
 298     // into during run time, increasing the potential of memory sharing.
 299     int hash_original = obj->identity_hash();
 300     archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original));
 301     assert(archived_oop->mark().is_unlocked(), "sanity");
 302 
 303     DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 304     assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 305 
 306     ArchivedObjectCache* cache = archived_object_cache();
 307     cache->put(obj, archived_oop);
 308     if (log_is_enabled(Debug, cds, heap)) {
 309       ResourceMark rm;
 310       log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
 311                            p2i(obj), p2i(archived_oop), obj->klass()->external_name());
 312     }
 313   } else {
 314     log_error(cds, heap)(
 315       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 316       p2i(obj));
 317     vm_direct_exit(-1,
 318       err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
 319               SIZE_FORMAT "M", MaxHeapSize/M));
 320   }
 321   return archived_oop;
 322 }
 323 
 324 void HeapShared::archive_klass_objects() {
 325   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
 326   assert(klasses != NULL, "sanity");
 327   for (int i = 0; i < klasses->length(); i++) {
 328     Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
 329 
 330     // archive mirror object
 331     java_lang_Class::archive_mirror(k);
 332 
 333     // archive the resolved_referenes array
 334     if (k->is_instance_klass()) {
 335       InstanceKlass* ik = InstanceKlass::cast(k);
 336       ik->constants()->archive_resolved_references();
 337     }
 338   }
 339 }
 340 
 341 void HeapShared::run_full_gc_in_vm_thread() {
 342   if (HeapShared::can_write()) {
 343     // Avoid fragmentation while archiving heap objects.
 344     // We do this inside a safepoint, so that no further allocation can happen after GC
 345     // has finished.
 346     if (GCLocker::is_active()) {
 347       // Just checking for safety ...
 348       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 349       // has been modified such that JNI code is executed in some clean up threads after
 350       // we have finished class loading.
 351       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 352     } else {
 353       log_info(cds)("Run GC ...");
 354       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 355       log_info(cds)("Run GC done");
 356     }
 357   }
 358 }
 359 
 360 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
 361                                  GrowableArray<MemRegion>* open_regions) {
 362 
 363   G1HeapVerifier::verify_ready_for_archiving();
 364 
 365   {
 366     NoSafepointVerifier nsv;
 367 
 368     // Cache for recording where the archived objects are copied to
 369     create_archived_object_cache();
 370 
 371     log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 372                   p2i(CompressedOops::begin()), p2i(CompressedOops::end()));
 373     log_info(cds)("Dumping objects to closed archive heap region ...");
 374     copy_closed_objects(closed_regions);
 375 
 376     log_info(cds)("Dumping objects to open archive heap region ...");
 377     copy_open_objects(open_regions);
 378 
 379     destroy_archived_object_cache();
 380   }
 381 
 382   G1HeapVerifier::verify_archive_regions();
 383 }
 384 
 385 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
 386   assert(HeapShared::can_write(), "must be");
 387 
 388   G1CollectedHeap::heap()->begin_archive_alloc_range();
 389 
 390   // Archive interned string objects
 391   StringTable::write_to_archive(_dumped_interned_strings);
 392 
 393   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 394                            num_closed_archive_subgraph_entry_fields,
 395                            true /* is_closed_archive */,
 396                            false /* is_full_module_graph */);
 397 
 398   G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
 399                                                    os::vm_allocation_granularity());
 400 }
 401 
 402 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
 403   assert(HeapShared::can_write(), "must be");
 404 
 405   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 406 
 407   java_lang_Class::archive_basic_type_mirrors();
 408 
 409   archive_klass_objects();
 410 
 411   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 412                            num_open_archive_subgraph_entry_fields,
 413                            false /* is_closed_archive */,
 414                            false /* is_full_module_graph */);
 415   if (MetaspaceShared::use_full_module_graph()) {
 416     archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
 417                              num_fmg_open_archive_subgraph_entry_fields,
 418                              false /* is_closed_archive */,
 419                              true /* is_full_module_graph */);
 420     ClassLoaderDataShared::init_archived_oops();
 421   }
 422 
 423   copy_roots();
 424 
 425   G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
 426                                                    os::vm_allocation_granularity());
 427 }
 428 
 429 // Copy _pending_archive_roots into an objArray
 430 void HeapShared::copy_roots() {
 431   int length = _pending_roots != NULL ? _pending_roots->length() : 0;
 432   size_t size = objArrayOopDesc::object_size(length);
 433   Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
 434   HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
 435 
 436   memset(mem, 0, size * BytesPerWord);
 437   {
 438     // This is copied from MemAllocator::finish
 439     oopDesc::set_mark(mem, markWord::prototype());
 440     oopDesc::release_set_klass(mem, k);
 441   }
 442   {
 443     // This is copied from ObjArrayAllocator::initialize
 444     arrayOopDesc::set_length(mem, length);
 445   }
 446 
 447   _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
 448   for (int i = 0; i < length; i++) {
 449     roots()->obj_at_put(i, _pending_roots->at(i));
 450   }
 451   log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
 452 }
 453 
 454 void HeapShared::init_narrow_oop_decoding(address base, int shift) {
 455   _narrow_oop_base = base;
 456   _narrow_oop_shift = shift;
 457 }
 458 
 459 //
 460 // Subgraph archiving support
 461 //
 462 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 463 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 464 
 465 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 466 // there is no existing one for k. The subgraph_info records the relocated
 467 // Klass* of the original k.
 468 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
 469   assert(DumpSharedSpaces, "dump time only");
 470   bool created;
 471   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
 472   KlassSubGraphInfo* info =
 473     _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
 474                                                   &created);
 475   assert(created, "must not initialize twice");
 476   return info;
 477 }
 478 
 479 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 480   assert(DumpSharedSpaces, "dump time only");
 481   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
 482   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
 483   assert(info != NULL, "must have been initialized");
 484   return info;
 485 }
 486 
 487 // Add an entry field to the current KlassSubGraphInfo.
 488 void KlassSubGraphInfo::add_subgraph_entry_field(
 489       int static_field_offset, oop v, bool is_closed_archive) {
 490   assert(DumpSharedSpaces, "dump time only");
 491   if (_subgraph_entry_fields == NULL) {
 492     _subgraph_entry_fields =
 493       new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
 494   }
 495   _subgraph_entry_fields->append(static_field_offset);
 496   _subgraph_entry_fields->append(HeapShared::append_root(v));
 497 }
 498 
 499 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 500 // Only objects of boot classes can be included in sub-graph.
 501 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
 502   assert(DumpSharedSpaces, "dump time only");
 503   Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
 504 
 505   if (_subgraph_object_klasses == NULL) {
 506     _subgraph_object_klasses =
 507       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
 508   }
 509 
 510   assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class");
 511 
 512   if (_k == relocated_k) {
 513     // Don't add the Klass containing the sub-graph to it's own klass
 514     // initialization list.
 515     return;
 516   }
 517 
 518   if (relocated_k->is_instance_klass()) {
 519     assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(),
 520           "must be boot class");
 521     // vmClasses::xxx_klass() are not updated, need to check
 522     // the original Klass*
 523     if (orig_k == vmClasses::String_klass() ||
 524         orig_k == vmClasses::Object_klass()) {
 525       // Initialized early during VM initialization. No need to be added
 526       // to the sub-graph object class list.
 527       return;
 528     }
 529   } else if (relocated_k->is_objArray_klass()) {
 530     Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass();
 531     if (abk->is_instance_klass()) {
 532       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 533             "must be boot class");
 534     }
 535     if (relocated_k == Universe::objectArrayKlassObj()) {
 536       // Initialized early during Universe::genesis. No need to be added
 537       // to the list.
 538       return;
 539     }
 540   } else {
 541     assert(relocated_k->is_typeArray_klass(), "must be");
 542     // Primitive type arrays are created early during Universe::genesis.
 543     return;
 544   }
 545 
 546   if (log_is_enabled(Debug, cds, heap)) {
 547     if (!_subgraph_object_klasses->contains(relocated_k)) {
 548       ResourceMark rm;
 549       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 550     }
 551   }
 552 
 553   _subgraph_object_klasses->append_if_missing(relocated_k);
 554   _has_non_early_klasses |= is_non_early_klass(orig_k);
 555 }
 556 
 557 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
 558   if (k->is_objArray_klass()) {
 559     k = ObjArrayKlass::cast(k)->bottom_klass();
 560   }
 561   if (k->is_instance_klass()) {
 562     if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
 563       ResourceMark rm;
 564       log_info(cds, heap)("non-early: %s", k->external_name());
 565       return true;
 566     } else {
 567       return false;
 568     }
 569   } else {
 570     return false;
 571   }
 572 }
 573 
 574 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 575 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 576   _k = info->klass();
 577   _entry_field_records = NULL;
 578   _subgraph_object_klasses = NULL;
 579   _is_full_module_graph = info->is_full_module_graph();
 580 
 581   if (_is_full_module_graph) {
 582     // Consider all classes referenced by the full module graph as early -- we will be
 583     // allocating objects of these classes during JVMTI early phase, so they cannot
 584     // be processed by (non-early) JVMTI ClassFileLoadHook
 585     _has_non_early_klasses = false;
 586   } else {
 587     _has_non_early_klasses = info->has_non_early_klasses();
 588   }
 589 
 590   if (_has_non_early_klasses) {
 591     ResourceMark rm;
 592     log_info(cds, heap)(
 593           "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
 594           _k->external_name());
 595   }
 596 
 597   // populate the entry fields
 598   GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
 599   if (entry_fields != NULL) {
 600     int num_entry_fields = entry_fields->length();
 601     assert(num_entry_fields % 2 == 0, "sanity");
 602     _entry_field_records =
 603       ArchiveBuilder::new_ro_array<int>(num_entry_fields);
 604     for (int i = 0 ; i < num_entry_fields; i++) {
 605       _entry_field_records->at_put(i, entry_fields->at(i));
 606     }
 607   }
 608 
 609   // the Klasses of the objects in the sub-graphs
 610   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 611   if (subgraph_object_klasses != NULL) {
 612     int num_subgraphs_klasses = subgraph_object_klasses->length();
 613     _subgraph_object_klasses =
 614       ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
 615     for (int i = 0; i < num_subgraphs_klasses; i++) {
 616       Klass* subgraph_k = subgraph_object_klasses->at(i);
 617       if (log_is_enabled(Info, cds, heap)) {
 618         ResourceMark rm;
 619         log_info(cds, heap)(
 620           "Archived object klass %s (%2d) => %s",
 621           _k->external_name(), i, subgraph_k->external_name());
 622       }
 623       _subgraph_object_klasses->at_put(i, subgraph_k);
 624       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 625     }
 626   }
 627 
 628   ArchivePtrMarker::mark_pointer(&_k);
 629   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 630   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 631 }
 632 
 633 struct CopyKlassSubGraphInfoToArchive : StackObj {
 634   CompactHashtableWriter* _writer;
 635   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 636 
 637   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 638     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 639       ArchivedKlassSubGraphInfoRecord* record =
 640         (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 641       record->init(&info);
 642 
 643       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
 644       u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
 645       _writer->add(hash, delta);
 646     }
 647     return true; // keep on iterating
 648   }
 649 };
 650 
 651 // Build the records of archived subgraph infos, which include:
 652 // - Entry points to all subgraphs from the containing class mirror. The entry
 653 //   points are static fields in the mirror. For each entry point, the field
 654 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 655 //   info. The value is stored back to the corresponding field at runtime.
 656 // - A list of klasses that need to be loaded/initialized before archived
 657 //   java object sub-graph can be accessed at runtime.
 658 void HeapShared::write_subgraph_info_table() {
 659   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 660   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 661   CompactHashtableStats stats;
 662 
 663   _run_time_subgraph_info_table.reset();
 664 
 665   CompactHashtableWriter writer(d_table->_count, &stats);
 666   CopyKlassSubGraphInfoToArchive copy(&writer);
 667   d_table->iterate(&copy);
 668 
 669   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 670 }
 671 
 672 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
 673   _run_time_subgraph_info_table.serialize_header(soc);
 674 }
 675 
 676 static void verify_the_heap(Klass* k, const char* which) {
 677   if (VerifyArchivedFields > 0) {
 678     ResourceMark rm;
 679     log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
 680                         which, k->external_name());
 681 
 682     VM_Verify verify_op;
 683     VMThread::execute(&verify_op);
 684 
 685     if (VerifyArchivedFields > 1 && is_init_completed()) {
 686       // At this time, the oop->klass() of some archived objects in the heap may not
 687       // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
 688       // have enough information (object size, oop maps, etc) so that a GC can be safely
 689       // performed.
 690       //
 691       // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
 692       // to check for GC safety.
 693       log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
 694                           which, k->external_name());
 695       FlagSetting fs1(VerifyBeforeGC, true);
 696       FlagSetting fs2(VerifyDuringGC, true);
 697       FlagSetting fs3(VerifyAfterGC,  true);
 698       Universe::heap()->collect(GCCause::_java_lang_system_gc);
 699     }
 700   }
 701 }
 702 
 703 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
 704 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
 705 //
 706 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
 707 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
 708 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
 709 void HeapShared::resolve_classes(JavaThread* THREAD) {
 710   if (!is_fully_available()) {
 711     return; // nothing to do
 712   }
 713   resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields,
 714                                 num_closed_archive_subgraph_entry_fields,
 715                                 THREAD);
 716   resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields,
 717                                 num_open_archive_subgraph_entry_fields,
 718                                 THREAD);
 719   resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields,
 720                                 num_fmg_open_archive_subgraph_entry_fields,
 721                                 THREAD);
 722 }
 723 
 724 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[],
 725                                                int num, JavaThread* THREAD) {
 726   for (int i = 0; i < num; i++) {
 727     ArchivableStaticFieldInfo* info = &fields[i];
 728     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
 729     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
 730     assert(k != NULL && k->is_shared_boot_class(), "sanity");
 731     resolve_classes_for_subgraph_of(k, THREAD);
 732   }
 733 }
 734 
 735 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) {
 736   ExceptionMark em(THREAD);
 737   const ArchivedKlassSubGraphInfoRecord* record =
 738    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
 739   if (HAS_PENDING_EXCEPTION) {
 740    CLEAR_PENDING_EXCEPTION;
 741   }
 742   if (record == NULL) {
 743    clear_archived_roots_of(k);
 744   }
 745 }
 746 
 747 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) {
 748   if (!is_fully_available()) {
 749     return; // nothing to do
 750   }
 751 
 752   ExceptionMark em(THREAD);
 753   const ArchivedKlassSubGraphInfoRecord* record =
 754     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
 755 
 756   if (HAS_PENDING_EXCEPTION) {
 757     CLEAR_PENDING_EXCEPTION;
 758     // None of the field value will be set if there was an exception when initializing the classes.
 759     // The java code will not see any of the archived objects in the
 760     // subgraphs referenced from k in this case.
 761     return;
 762   }
 763 
 764   if (record != NULL) {
 765     init_archived_fields_for(k, record);
 766   }
 767 }
 768 
 769 const ArchivedKlassSubGraphInfoRecord*
 770 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
 771   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 772 
 773   if (!k->is_shared()) {
 774     return NULL;
 775   }
 776   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 777   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 778 
 779   // Initialize from archived data. Currently this is done only
 780   // during VM initialization time. No lock is needed.
 781   if (record != NULL) {
 782     if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
 783       if (log_is_enabled(Info, cds, heap)) {
 784         ResourceMark rm(THREAD);
 785         log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
 786                             k->external_name());
 787       }
 788       return NULL;
 789     }
 790 
 791     if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
 792       if (log_is_enabled(Info, cds, heap)) {
 793         ResourceMark rm(THREAD);
 794         log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
 795                             k->external_name());
 796       }
 797       return NULL;
 798     }
 799 
 800     resolve_or_init(k, do_init, CHECK_NULL);
 801 
 802     // Load/link/initialize the klasses of the objects in the subgraph.
 803     // NULL class loader is used.
 804     Array<Klass*>* klasses = record->subgraph_object_klasses();
 805     if (klasses != NULL) {
 806       for (int i = 0; i < klasses->length(); i++) {
 807         Klass* klass = klasses->at(i);
 808         if (!klass->is_shared()) {
 809           return NULL;
 810         }
 811         resolve_or_init(klass, do_init, CHECK_NULL);
 812       }
 813     }
 814   }
 815 
 816   return record;
 817 }
 818 
 819 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
 820   if (!do_init) {
 821     if (k->class_loader_data() == NULL) {
 822       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
 823       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
 824     }
 825   } else {
 826     assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
 827     if (k->is_instance_klass()) {
 828       InstanceKlass* ik = InstanceKlass::cast(k);
 829       ik->initialize(CHECK);
 830     } else if (k->is_objArray_klass()) {
 831       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
 832       oak->initialize(CHECK);
 833     }
 834   }
 835 }
 836 
 837 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
 838   verify_the_heap(k, "before");
 839 
 840   // Load the subgraph entry fields from the record and store them back to
 841   // the corresponding fields within the mirror.
 842   oop m = k->java_mirror();
 843   Array<int>* entry_field_records = record->entry_field_records();
 844   if (entry_field_records != NULL) {
 845     int efr_len = entry_field_records->length();
 846     assert(efr_len % 2 == 0, "sanity");
 847     for (int i = 0; i < efr_len; i += 2) {
 848       int field_offset = entry_field_records->at(i);
 849       int root_index = entry_field_records->at(i+1);
 850       oop v = get_root(root_index, /*clear=*/true);
 851       m->obj_field_put(field_offset, v);
 852       log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
 853     }
 854 
 855     // Done. Java code can see the archived sub-graphs referenced from k's
 856     // mirror after this point.
 857     if (log_is_enabled(Info, cds, heap)) {
 858       ResourceMark rm;
 859       log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
 860                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
 861     }
 862   }
 863 
 864   verify_the_heap(k, "after ");
 865 }
 866 
 867 void HeapShared::clear_archived_roots_of(Klass* k) {
 868   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 869   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 870   if (record != NULL) {
 871     Array<int>* entry_field_records = record->entry_field_records();
 872     if (entry_field_records != NULL) {
 873       int efr_len = entry_field_records->length();
 874       assert(efr_len % 2 == 0, "sanity");
 875       for (int i = 0; i < efr_len; i += 2) {
 876         int root_index = entry_field_records->at(i+1);
 877         clear_root(root_index);
 878       }
 879     }
 880   }
 881 }
 882 
 883 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
 884   int _level;
 885   bool _is_closed_archive;
 886   bool _record_klasses_only;
 887   KlassSubGraphInfo* _subgraph_info;
 888   oop _orig_referencing_obj;
 889   oop _archived_referencing_obj;
 890  public:
 891   WalkOopAndArchiveClosure(int level,
 892                            bool is_closed_archive,
 893                            bool record_klasses_only,
 894                            KlassSubGraphInfo* subgraph_info,
 895                            oop orig, oop archived) :
 896     _level(level), _is_closed_archive(is_closed_archive),
 897     _record_klasses_only(record_klasses_only),
 898     _subgraph_info(subgraph_info),
 899     _orig_referencing_obj(orig), _archived_referencing_obj(archived) {}
 900   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 901   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
 902 
 903  protected:
 904   template <class T> void do_oop_work(T *p) {
 905     oop obj = RawAccess<>::oop_load(p);
 906     if (!CompressedOops::is_null(obj)) {
 907       assert(!HeapShared::is_archived_object_during_dumptime(obj),
 908              "original objects must not point to archived objects");
 909 
 910       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
 911       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
 912 
 913       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
 914         ResourceMark rm;
 915         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
 916                              _orig_referencing_obj->klass()->external_name(), field_delta,
 917                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
 918         LogTarget(Trace, cds, heap) log;
 919         LogStream out(log);
 920         obj->print_on(&out);
 921       }
 922 
 923       oop archived = HeapShared::archive_reachable_objects_from(
 924           _level + 1, _subgraph_info, obj, _is_closed_archive);
 925       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
 926       assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
 927 
 928       if (!_record_klasses_only) {
 929         // Update the reference in the archived copy of the referencing object.
 930         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
 931                              _level, p2i(new_p), p2i(obj), p2i(archived));
 932         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
 933       }
 934     }
 935   }
 936 };
 937 
 938 void HeapShared::check_closed_region_object(InstanceKlass* k) {
 939   // Check fields in the object
 940   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 941     if (!fs.access_flags().is_static()) {
 942       BasicType ft = fs.field_descriptor().field_type();
 943       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
 944         ResourceMark rm;
 945         log_warning(cds, heap)(
 946           "Please check reference field in %s instance in closed archive heap region: %s %s",
 947           k->external_name(), (fs.name())->as_C_string(),
 948           (fs.signature())->as_C_string());
 949       }
 950     }
 951   }
 952 }
 953 
 954 void HeapShared::check_module_oop(oop orig_module_obj) {
 955   assert(DumpSharedSpaces, "must be");
 956   assert(java_lang_Module::is_instance(orig_module_obj), "must be");
 957   ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
 958   if (orig_module_ent == NULL) {
 959     // These special Module objects are created in Java code. They are not
 960     // defined via Modules::define_module(), so they don't have a ModuleEntry:
 961     //     java.lang.Module::ALL_UNNAMED_MODULE
 962     //     java.lang.Module::EVERYONE_MODULE
 963     //     jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
 964     assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
 965     log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
 966   } else {
 967     ClassLoaderData* loader_data = orig_module_ent->loader_data();
 968     assert(loader_data->is_builtin_class_loader_data(), "must be");
 969   }
 970 }
 971 
 972 
 973 // (1) If orig_obj has not been archived yet, archive it.
 974 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
 975 //     trace all  objects that are reachable from it, and make sure these objects are archived.
 976 // (3) Record the klasses of all orig_obj and all reachable objects.
 977 oop HeapShared::archive_reachable_objects_from(int level,
 978                                                KlassSubGraphInfo* subgraph_info,
 979                                                oop orig_obj,
 980                                                bool is_closed_archive) {
 981   assert(orig_obj != NULL, "must be");
 982   assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
 983 
 984   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
 985     // This object has injected fields that cannot be supported easily, so we disallow them for now.
 986     // If you get an error here, you probably made a change in the JDK library that has added
 987     // these objects that are referenced (directly or indirectly) by static fields.
 988     ResourceMark rm;
 989     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
 990     vm_direct_exit(1);
 991   }
 992 
 993   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
 994   // them as Klass::_archived_mirror because they need to be specially restored at run time.
 995   //
 996   // If you get an error here, you probably made a change in the JDK library that has added a Class
 997   // object that is referenced (directly or indirectly) by static fields.
 998   if (java_lang_Class::is_instance(orig_obj)) {
 999     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1000     vm_direct_exit(1);
1001   }
1002 
1003   oop archived_obj = find_archived_heap_object(orig_obj);
1004   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
1005     // To save time, don't walk strings that are already archived. They just contain
1006     // pointers to a type array, whose klass doesn't need to be recorded.
1007     return archived_obj;
1008   }
1009 
1010   if (has_been_seen_during_subgraph_recording(orig_obj)) {
1011     // orig_obj has already been archived and traced. Nothing more to do.
1012     return archived_obj;
1013   } else {
1014     set_has_been_seen_during_subgraph_recording(orig_obj);
1015   }
1016 
1017   bool record_klasses_only = (archived_obj != NULL);
1018   if (archived_obj == NULL) {
1019     ++_num_new_archived_objs;
1020     archived_obj = archive_object(orig_obj);
1021     if (archived_obj == NULL) {
1022       // Skip archiving the sub-graph referenced from the current entry field.
1023       ResourceMark rm;
1024       log_error(cds, heap)(
1025         "Cannot archive the sub-graph referenced from %s object ("
1026         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1027         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1028       if (level == 1) {
1029         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1030         // as the Java code will take care of initializing this field dynamically.
1031         return NULL;
1032       } else {
1033         // We don't know how to handle an object that has been archived, but some of its reachable
1034         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1035         // we have a real use case.
1036         vm_direct_exit(1);
1037       }
1038     }
1039 
1040     if (java_lang_Module::is_instance(orig_obj)) {
1041       check_module_oop(orig_obj);
1042       java_lang_Module::set_module_entry(archived_obj, NULL);
1043       java_lang_Module::set_loader(archived_obj, NULL);
1044     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1045       // class_data will be restored explicitly at run time.
1046       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
1047                 orig_obj == SystemDictionary::java_system_loader() ||
1048                 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
1049       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
1050     }
1051   }
1052 
1053   assert(archived_obj != NULL, "must be");
1054   Klass *orig_k = orig_obj->klass();
1055   subgraph_info->add_subgraph_object_klass(orig_k);
1056 
1057   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1058                                   subgraph_info, orig_obj, archived_obj);
1059   orig_obj->oop_iterate(&walker);
1060   if (is_closed_archive && orig_k->is_instance_klass()) {
1061     check_closed_region_object(InstanceKlass::cast(orig_k));
1062   }
1063   return archived_obj;
1064 }
1065 
1066 //
1067 // Start from the given static field in a java mirror and archive the
1068 // complete sub-graph of java heap objects that are reached directly
1069 // or indirectly from the starting object by following references.
1070 // Sub-graph archiving restrictions (current):
1071 //
1072 // - All classes of objects in the archived sub-graph (including the
1073 //   entry class) must be boot class only.
1074 // - No java.lang.Class instance (java mirror) can be included inside
1075 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
1076 //
1077 // The Java heap object sub-graph archiving process (see
1078 // WalkOopAndArchiveClosure):
1079 //
1080 // 1) Java object sub-graph archiving starts from a given static field
1081 // within a Class instance (java mirror). If the static field is a
1082 // refererence field and points to a non-null java object, proceed to
1083 // the next step.
1084 //
1085 // 2) Archives the referenced java object. If an archived copy of the
1086 // current object already exists, updates the pointer in the archived
1087 // copy of the referencing object to point to the current archived object.
1088 // Otherwise, proceed to the next step.
1089 //
1090 // 3) Follows all references within the current java object and recursively
1091 // archive the sub-graph of objects starting from each reference.
1092 //
1093 // 4) Updates the pointer in the archived copy of referencing object to
1094 // point to the current archived object.
1095 //
1096 // 5) The Klass of the current java object is added to the list of Klasses
1097 // for loading and initialzing before any object in the archived graph can
1098 // be accessed at runtime.
1099 //
1100 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1101                                                              const char* klass_name,
1102                                                              int field_offset,
1103                                                              const char* field_name,
1104                                                              bool is_closed_archive) {
1105   assert(DumpSharedSpaces, "dump time only");
1106   assert(k->is_shared_boot_class(), "must be boot class");
1107 
1108   oop m = k->java_mirror();
1109 
1110   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1111   oop f = m->obj_field(field_offset);
1112 
1113   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1114 
1115   if (!CompressedOops::is_null(f)) {
1116     if (log_is_enabled(Trace, cds, heap)) {
1117       LogTarget(Trace, cds, heap) log;
1118       LogStream out(log);
1119       f->print_on(&out);
1120     }
1121 
1122     oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1123 
1124     if (af == NULL) {
1125       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1126                            klass_name, field_name);
1127     } else {
1128       // Note: the field value is not preserved in the archived mirror.
1129       // Record the field as a new subGraph entry point. The recorded
1130       // information is restored from the archive at runtime.
1131       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1132       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
1133     }
1134   } else {
1135     // The field contains null, we still need to record the entry point,
1136     // so it can be restored at runtime.
1137     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
1138   }
1139 }
1140 
1141 #ifndef PRODUCT
1142 class VerifySharedOopClosure: public BasicOopIterateClosure {
1143  private:
1144   bool _is_archived;
1145 
1146  public:
1147   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1148 
1149   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1150   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1151 
1152  protected:
1153   template <class T> void do_oop_work(T *p) {
1154     oop obj = RawAccess<>::oop_load(p);
1155     if (!CompressedOops::is_null(obj)) {
1156       HeapShared::verify_reachable_objects_from(obj, _is_archived);
1157     }
1158   }
1159 };
1160 
1161 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1162   assert(DumpSharedSpaces, "dump time only");
1163   assert(k->is_shared_boot_class(), "must be boot class");
1164 
1165   oop m = k->java_mirror();
1166   oop f = m->obj_field(field_offset);
1167   if (!CompressedOops::is_null(f)) {
1168     verify_subgraph_from(f);
1169   }
1170 }
1171 
1172 void HeapShared::verify_subgraph_from(oop orig_obj) {
1173   oop archived_obj = find_archived_heap_object(orig_obj);
1174   if (archived_obj == NULL) {
1175     // It's OK for the root of a subgraph to be not archived. See comments in
1176     // archive_reachable_objects_from().
1177     return;
1178   }
1179 
1180   // Verify that all objects reachable from orig_obj are archived.
1181   init_seen_objects_table();
1182   verify_reachable_objects_from(orig_obj, false);
1183   delete_seen_objects_table();
1184 
1185   // Note: we could also verify that all objects reachable from the archived
1186   // copy of orig_obj can only point to archived objects, with:
1187   //      init_seen_objects_table();
1188   //      verify_reachable_objects_from(archived_obj, true);
1189   //      init_seen_objects_table();
1190   // but that's already done in G1HeapVerifier::verify_archive_regions so we
1191   // won't do it here.
1192 }
1193 
1194 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1195   _num_total_verifications ++;
1196   if (!has_been_seen_during_subgraph_recording(obj)) {
1197     set_has_been_seen_during_subgraph_recording(obj);
1198 
1199     if (is_archived) {
1200       assert(is_archived_object_during_dumptime(obj), "must be");
1201       assert(find_archived_heap_object(obj) == NULL, "must be");
1202     } else {
1203       assert(!is_archived_object_during_dumptime(obj), "must be");
1204       assert(find_archived_heap_object(obj) != NULL, "must be");
1205     }
1206 
1207     VerifySharedOopClosure walker(is_archived);
1208     obj->oop_iterate(&walker);
1209   }
1210 }
1211 #endif
1212 
1213 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
1214 int HeapShared::_num_new_walked_objs;
1215 int HeapShared::_num_new_archived_objs;
1216 int HeapShared::_num_old_recorded_klasses;
1217 
1218 int HeapShared::_num_total_subgraph_recordings = 0;
1219 int HeapShared::_num_total_walked_objs = 0;
1220 int HeapShared::_num_total_archived_objs = 0;
1221 int HeapShared::_num_total_recorded_klasses = 0;
1222 int HeapShared::_num_total_verifications = 0;
1223 
1224 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1225   return _seen_objects_table->get(obj) != NULL;
1226 }
1227 
1228 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1229   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1230   _seen_objects_table->put(obj, true);
1231   ++ _num_new_walked_objs;
1232 }
1233 
1234 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1235   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1236   init_subgraph_info(k, is_full_module_graph);
1237   init_seen_objects_table();
1238   _num_new_walked_objs = 0;
1239   _num_new_archived_objs = 0;
1240   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1241 }
1242 
1243 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1244   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1245     _num_old_recorded_klasses;
1246   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1247                       "walked %d objs, archived %d new objs, recorded %d classes",
1248                       class_name, _num_new_walked_objs, _num_new_archived_objs,
1249                       num_new_recorded_klasses);
1250 
1251   delete_seen_objects_table();
1252 
1253   _num_total_subgraph_recordings ++;
1254   _num_total_walked_objs      += _num_new_walked_objs;
1255   _num_total_archived_objs    += _num_new_archived_objs;
1256   _num_total_recorded_klasses +=  num_new_recorded_klasses;
1257 }
1258 
1259 class ArchivableStaticFieldFinder: public FieldClosure {
1260   InstanceKlass* _ik;
1261   Symbol* _field_name;
1262   bool _found;
1263   int _offset;
1264 public:
1265   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1266     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1267 
1268   virtual void do_field(fieldDescriptor* fd) {
1269     if (fd->name() == _field_name) {
1270       assert(!_found, "fields cannot be overloaded");
1271       assert(is_reference_type(fd->field_type()), "can archive only fields that are references");
1272       _found = true;
1273       _offset = fd->offset();
1274     }
1275   }
1276   bool found()     { return _found;  }
1277   int offset()     { return _offset; }
1278 };
1279 
1280 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1281                                             int num, TRAPS) {
1282   for (int i = 0; i < num; i++) {
1283     ArchivableStaticFieldInfo* info = &fields[i];
1284     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1285     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1286 
1287     Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK);
1288     InstanceKlass* ik = InstanceKlass::cast(k);
1289     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1290            "Only support boot classes");
1291     ik->initialize(CHECK);
1292 
1293     ArchivableStaticFieldFinder finder(ik, field_name);
1294     ik->do_local_static_fields(&finder);
1295     assert(finder.found(), "field must exist");
1296 
1297     info->klass = ik;
1298     info->offset = finder.offset();
1299   }
1300 }
1301 
1302 void HeapShared::init_subgraph_entry_fields(TRAPS) {
1303   assert(HeapShared::can_write(), "must be");
1304   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1305   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields,
1306                              num_closed_archive_subgraph_entry_fields,
1307                              CHECK);
1308   init_subgraph_entry_fields(open_archive_subgraph_entry_fields,
1309                              num_open_archive_subgraph_entry_fields,
1310                              CHECK);
1311   if (MetaspaceShared::use_full_module_graph()) {
1312     init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields,
1313                                num_fmg_open_archive_subgraph_entry_fields,
1314                                CHECK);
1315   }
1316 }
1317 
1318 void HeapShared::init_for_dumping(TRAPS) {
1319   if (HeapShared::can_write()) {
1320     _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1321     init_subgraph_entry_fields(CHECK);
1322   }
1323 }
1324 
1325 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1326                                           int num, bool is_closed_archive,
1327                                           bool is_full_module_graph) {
1328   _num_total_subgraph_recordings = 0;
1329   _num_total_walked_objs = 0;
1330   _num_total_archived_objs = 0;
1331   _num_total_recorded_klasses = 0;
1332   _num_total_verifications = 0;
1333 
1334   // For each class X that has one or more archived fields:
1335   // [1] Dump the subgraph of each archived field
1336   // [2] Create a list of all the class of the objects that can be reached
1337   //     by any of these static fields.
1338   //     At runtime, these classes are initialized before X's archived fields
1339   //     are restored by HeapShared::initialize_from_archived_subgraph().
1340   int i;
1341   for (i = 0; i < num; ) {
1342     ArchivableStaticFieldInfo* info = &fields[i];
1343     const char* klass_name = info->klass_name;
1344     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1345 
1346     // If you have specified consecutive fields of the same klass in
1347     // fields[], these will be archived in the same
1348     // {start_recording_subgraph ... done_recording_subgraph} pass to
1349     // save time.
1350     for (; i < num; i++) {
1351       ArchivableStaticFieldInfo* f = &fields[i];
1352       if (f->klass_name != klass_name) {
1353         break;
1354       }
1355 
1356       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1357                                                   f->offset, f->field_name,
1358                                                   is_closed_archive);
1359     }
1360     done_recording_subgraph(info->klass, klass_name);
1361   }
1362 
1363   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1364                       is_closed_archive ? "closed" : "open",
1365                       _num_total_subgraph_recordings);
1366   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1367   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1368   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1369 
1370 #ifndef PRODUCT
1371   for (int i = 0; i < num; i++) {
1372     ArchivableStaticFieldInfo* f = &fields[i];
1373     verify_subgraph_from_static_field(f->klass, f->offset);
1374   }
1375   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1376 #endif
1377 }
1378 
1379 // Not all the strings in the global StringTable are dumped into the archive, because
1380 // some of those strings may be only referenced by classes that are excluded from
1381 // the archive. We need to explicitly mark the strings that are:
1382 //   [1] used by classes that WILL be archived;
1383 //   [2] included in the SharedArchiveConfigFile.
1384 void HeapShared::add_to_dumped_interned_strings(oop string) {
1385   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1386   bool created;
1387   _dumped_interned_strings->put_if_absent(string, true, &created);
1388 }
1389 
1390 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1391 // region. This way we can quickly relocate all the pointers without using
1392 // BasicOopIterateClosure at runtime.
1393 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1394   narrowOop* _start;
1395   BitMap *_oopmap;
1396   int _num_total_oops;
1397   int _num_null_oops;
1398  public:
1399   FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap)
1400     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1401 
1402   virtual void do_oop(narrowOop* p) {
1403     _num_total_oops ++;
1404     narrowOop v = *p;
1405     if (!CompressedOops::is_null(v)) {
1406       size_t idx = p - _start;
1407       _oopmap->set_bit(idx);
1408     } else {
1409       _num_null_oops ++;
1410     }
1411   }
1412   virtual void do_oop(oop *p) {
1413     ShouldNotReachHere();
1414   }
1415   int num_total_oops() const { return _num_total_oops; }
1416   int num_null_oops()  const { return _num_null_oops; }
1417 };
1418 
1419 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1420   assert(UseCompressedOops, "must be");
1421   size_t num_bits = region.byte_size() / sizeof(narrowOop);
1422   ResourceBitMap oopmap(num_bits);
1423 
1424   HeapWord* p   = region.start();
1425   HeapWord* end = region.end();
1426   FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
1427   ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
1428 
1429   int num_objs = 0;
1430   while (p < end) {
1431     oop o = cast_to_oop(p);
1432     o->oop_iterate(&finder);
1433     p += o->size();
1434     if (DumpSharedSpaces) {
1435       builder->relocate_klass_ptr(o);
1436     }
1437     ++ num_objs;
1438   }
1439 
1440   log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d",
1441                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1442   return oopmap;
1443 }
1444 
1445 // Patch all the embedded oop pointers inside an archived heap region,
1446 // to be consistent with the runtime oop encoding.
1447 class PatchEmbeddedPointers: public BitMapClosure {
1448   narrowOop* _start;
1449 
1450  public:
1451   PatchEmbeddedPointers(narrowOop* start) : _start(start) {}
1452 
1453   bool do_bit(size_t offset) {
1454     narrowOop* p = _start + offset;
1455     narrowOop v = *p;
1456     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1457     oop o = HeapShared::decode_from_archive(v);
1458     RawAccess<IS_NOT_NULL>::oop_store(p, o);
1459     return true;
1460   }
1461 };
1462 
1463 // Patch all the non-null pointers that are embedded in the archived heap objects
1464 // in this region
1465 void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap,
1466                                          size_t oopmap_size_in_bits) {
1467   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
1468 
1469 #ifndef PRODUCT
1470   ResourceMark rm;
1471   ResourceBitMap checkBm = calculate_oopmap(region);
1472   assert(bm.is_same(checkBm), "sanity");
1473 #endif
1474 
1475   PatchEmbeddedPointers patcher((narrowOop*)region.start());
1476   bm.iterate(&patcher);
1477 }
1478 
1479 // The CDS archive remembers each heap object by its address at dump time, but
1480 // the heap object may be loaded at a different address at run time. This structure is used
1481 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
1482 // to their runtime addresses.
1483 struct LoadedArchiveHeapRegion {
1484   int       _region_index;   // index for FileMapInfo::space_at(index)
1485   size_t    _region_size;    // number of bytes in this region
1486   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
1487   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
1488                              // runtime address is P + _runtime_offset
1489 
1490   static int comparator(const void* a, const void* b) {
1491     LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a;
1492     LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b;
1493     if (reg_a->_dumptime_base < reg_b->_dumptime_base) {
1494       return -1;
1495     } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) {
1496       return 0;
1497     } else {
1498       return 1;
1499     }
1500   }
1501 
1502   uintptr_t top() {
1503     return _dumptime_base + _region_size;
1504   }
1505 };
1506 
1507 void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions,
1508                                              int num_loaded_regions) {
1509   _dumptime_base_0 = loaded_regions[0]._dumptime_base;
1510   _dumptime_base_1 = loaded_regions[1]._dumptime_base;
1511   _dumptime_base_2 = loaded_regions[2]._dumptime_base;
1512   _dumptime_base_3 = loaded_regions[3]._dumptime_base;
1513   _dumptime_top = loaded_regions[num_loaded_regions-1].top();
1514 
1515   _runtime_offset_0 = loaded_regions[0]._runtime_offset;
1516   _runtime_offset_1 = loaded_regions[1]._runtime_offset;
1517   _runtime_offset_2 = loaded_regions[2]._runtime_offset;
1518   _runtime_offset_3 = loaded_regions[3]._runtime_offset;
1519 
1520   assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be");
1521   if (num_loaded_regions < 4) {
1522     _dumptime_base_3 = UINTPTR_MAX;
1523   }
1524   if (num_loaded_regions < 3) {
1525     _dumptime_base_2 = UINTPTR_MAX;
1526   }
1527 }
1528 
1529 bool HeapShared::can_load() {
1530   return Universe::heap()->can_load_archived_objects();
1531 }
1532 
1533 template <int NUM_LOADED_REGIONS>
1534 class PatchLoadedRegionPointers: public BitMapClosure {
1535   narrowOop* _start;
1536   intx _offset_0;
1537   intx _offset_1;
1538   intx _offset_2;
1539   intx _offset_3;
1540   uintptr_t _base_0;
1541   uintptr_t _base_1;
1542   uintptr_t _base_2;
1543   uintptr_t _base_3;
1544   uintptr_t _top;
1545 
1546   static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
1547   static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
1548   static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
1549 
1550  public:
1551   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions)
1552     : _start(start),
1553       _offset_0(loaded_regions[0]._runtime_offset),
1554       _offset_1(loaded_regions[1]._runtime_offset),
1555       _offset_2(loaded_regions[2]._runtime_offset),
1556       _offset_3(loaded_regions[3]._runtime_offset),
1557       _base_0(loaded_regions[0]._dumptime_base),
1558       _base_1(loaded_regions[1]._dumptime_base),
1559       _base_2(loaded_regions[2]._dumptime_base),
1560       _base_3(loaded_regions[3]._dumptime_base) {
1561     _top = loaded_regions[NUM_LOADED_REGIONS-1].top();
1562   }
1563 
1564   bool do_bit(size_t offset) {
1565     narrowOop* p = _start + offset;
1566     narrowOop v = *p;
1567     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
1568     uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v));
1569     assert(_base_0 <= o && o < _top, "must be");
1570 
1571 
1572     // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons.
1573     if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
1574       o += _offset_3;
1575     } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
1576       o += _offset_2;
1577     } else if (o >= _base_1) {
1578       o += _offset_1;
1579     } else {
1580       o += _offset_0;
1581     }
1582     HeapShared::assert_in_loaded_heap(o);
1583     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
1584     return true;
1585   }
1586 };
1587 
1588 int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1589                                     MemRegion& archive_space) {
1590   size_t total_bytes = 0;
1591   int num_loaded_regions = 0;
1592   for (int i = MetaspaceShared::first_archive_heap_region;
1593        i <= MetaspaceShared::last_archive_heap_region; i++) {
1594     FileMapRegion* r = mapinfo->space_at(i);
1595     r->assert_is_heap_region();
1596     if (r->used() > 0) {
1597       assert(is_aligned(r->used(), HeapWordSize), "must be");
1598       total_bytes += r->used();
1599       LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
1600       ri->_region_index = i;
1601       ri->_region_size = r->used();
1602       ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r);
1603     }
1604   }
1605 
1606   assert(is_aligned(total_bytes, HeapWordSize), "must be");
1607   size_t word_size = total_bytes / HeapWordSize;
1608   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
1609   if (buffer == nullptr) {
1610     return 0;
1611   }
1612 
1613   archive_space = MemRegion(buffer, word_size);
1614   _loaded_heap_bottom = (uintptr_t)archive_space.start();
1615   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
1616 
1617   return num_loaded_regions;
1618 }
1619 
1620 void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
1621                                      uintptr_t buffer) {
1622   // Find the relocation offset of the pointers in each region
1623   qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
1624         LoadedArchiveHeapRegion::comparator);
1625 
1626   uintptr_t p = buffer;
1627   for (int i = 0; i < num_loaded_regions; i++) {
1628     // This region will be loaded at p, so all objects inside this
1629     // region will be shifted by ri->offset
1630     LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1631     ri->_runtime_offset = p - ri->_dumptime_base;
1632     p += ri->_region_size;
1633   }
1634   assert(p == _loaded_heap_top, "must be");
1635 }
1636 
1637 bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
1638                               int num_loaded_regions, uintptr_t buffer) {
1639   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
1640   uintptr_t load_address = buffer;
1641   for (int i = 0; i < num_loaded_regions; i++) {
1642     LoadedArchiveHeapRegion* ri = &loaded_regions[i];
1643     FileMapRegion* r = mapinfo->space_at(ri->_region_index);
1644 
1645     if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
1646       // There's no easy way to free the buffer, so we will fill it with zero later
1647       // in fill_failed_loaded_region(), and it will eventually be GC'ed.
1648       log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i);
1649       _loading_failed = true;
1650       return false;
1651     }
1652     log_info(cds)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
1653                   " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
1654                   ri->_region_index, load_address, load_address + ri->_region_size,
1655                   ri->_region_size, ri->_runtime_offset);
1656 
1657     uintptr_t oopmap = bitmap_base + r->oopmap_offset();
1658     BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
1659 
1660     if (num_loaded_regions == 4) {
1661       PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions);
1662       bm.iterate(&patcher);
1663     } else if (num_loaded_regions == 3) {
1664       PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions);
1665       bm.iterate(&patcher);
1666     } else {
1667       assert(num_loaded_regions == 2, "must be");
1668       PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions);
1669       bm.iterate(&patcher);
1670     }
1671 
1672     load_address += r->used();
1673   }
1674 
1675   return true;
1676 }
1677 
1678 bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) {
1679   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
1680 
1681   LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions];
1682   memset(loaded_regions, 0, sizeof(loaded_regions));
1683 
1684   MemRegion archive_space;
1685   int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space);
1686   if (num_loaded_regions <= 0) {
1687     return false;
1688   }
1689   sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start());
1690   if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) {
1691     assert(_loading_failed, "must be");
1692     return false;
1693   }
1694 
1695   init_loaded_heap_relocation(loaded_regions, num_loaded_regions);
1696   _is_loaded = true;
1697   set_roots(mapinfo->heap_obj_roots());
1698 
1699   return true;
1700 }
1701 
1702 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
1703   ResourceHashtable<uintptr_t, bool>* _table;
1704 
1705  public:
1706   VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
1707 
1708   virtual void do_oop(narrowOop* p) {
1709     // This should be called before the loaded regions are modified, so all the embedded pointers
1710     // must be NULL, or must point to a valid object in the loaded regions.
1711     narrowOop v = *p;
1712     if (!CompressedOops::is_null(v)) {
1713       oop o = CompressedOops::decode_not_null(v);
1714       uintptr_t u = cast_from_oop<uintptr_t>(o);
1715       HeapShared::assert_in_loaded_heap(u);
1716       guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions");
1717     }
1718   }
1719   virtual void do_oop(oop* p) {
1720     ShouldNotReachHere();
1721   }
1722 };
1723 
1724 void HeapShared::finish_initialization() {
1725   if (is_loaded()) {
1726     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1727     HeapWord* top    = (HeapWord*)_loaded_heap_top;
1728 
1729     MemRegion archive_space = MemRegion(bottom, top);
1730     Universe::heap()->complete_loaded_archive_space(archive_space);
1731   }
1732 
1733   if (VerifyArchivedFields <= 0 || !is_loaded()) {
1734     return;
1735   }
1736 
1737   log_info(cds, heap)("Verify all oops and pointers in loaded heap");
1738 
1739   ResourceMark rm;
1740   ResourceHashtable<uintptr_t, bool> table;
1741   VerifyLoadedHeapEmbeddedPointers verifier(&table);
1742   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1743   HeapWord* top    = (HeapWord*)_loaded_heap_top;
1744 
1745   for (HeapWord* p = bottom; p < top; ) {
1746     oop o = cast_to_oop(p);
1747     table.put(cast_from_oop<uintptr_t>(o), true);
1748     p += o->size();
1749   }
1750 
1751   for (HeapWord* p = bottom; p < top; ) {
1752     oop o = cast_to_oop(p);
1753     o->oop_iterate(&verifier);
1754     p += o->size();
1755   }
1756 }
1757 
1758 void HeapShared::fill_failed_loaded_region() {
1759   assert(_loading_failed, "must be");
1760   if (_loaded_heap_bottom != 0) {
1761     assert(_loaded_heap_top != 0, "must be");
1762     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
1763     HeapWord* top = (HeapWord*)_loaded_heap_top;
1764     Universe::heap()->fill_with_objects(bottom, top - bottom);
1765   }
1766 }
1767 
1768 #endif // INCLUDE_CDS_JAVA_HEAP
--- EOF ---