1 /*
   2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveHeapLoader.hpp"
  28 #include "cds/archiveUtils.hpp"
  29 #include "cds/cdsHeapVerifier.hpp"
  30 #include "cds/heapShared.hpp"
  31 #include "cds/metaspaceShared.hpp"
  32 #include "classfile/classLoaderData.hpp"
  33 #include "classfile/classLoaderDataShared.hpp"
  34 #include "classfile/javaClasses.inline.hpp"
  35 #include "classfile/moduleEntry.hpp"
  36 #include "classfile/stringTable.hpp"
  37 #include "classfile/symbolTable.hpp"
  38 #include "classfile/systemDictionary.hpp"
  39 #include "classfile/systemDictionaryShared.hpp"
  40 #include "classfile/vmClasses.hpp"
  41 #include "classfile/vmSymbols.hpp"
  42 #include "gc/shared/collectedHeap.hpp"
  43 #include "gc/shared/gcLocker.hpp"
  44 #include "gc/shared/gcVMOperations.hpp"
  45 #include "logging/log.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/resourceArea.hpp"
  49 #include "memory/universe.hpp"
  50 #include "oops/compressedOops.inline.hpp"
  51 #include "oops/fieldStreams.inline.hpp"
  52 #include "oops/objArrayOop.inline.hpp"
  53 #include "oops/oop.inline.hpp"
  54 #include "oops/typeArrayOop.inline.hpp"
  55 #include "prims/jvmtiExport.hpp"
  56 #include "runtime/fieldDescriptor.inline.hpp"
  57 #include "runtime/init.hpp"
  58 #include "runtime/javaCalls.hpp"
  59 #include "runtime/safepointVerifiers.hpp"
  60 #include "utilities/bitMap.inline.hpp"
  61 #include "utilities/copy.hpp"
  62 #if INCLUDE_G1GC
  63 #include "gc/g1/g1CollectedHeap.hpp"
  64 #endif
  65 
  66 #if INCLUDE_CDS_JAVA_HEAP
  67 
  68 struct ArchivableStaticFieldInfo {
  69   const char* klass_name;
  70   const char* field_name;
  71   InstanceKlass* klass;
  72   int offset;
  73   BasicType type;
  74 
  75   ArchivableStaticFieldInfo(const char* k, const char* f)
  76   : klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {}
  77 
  78   bool valid() {
  79     return klass_name != NULL;
  80   }
  81 };
  82 
  83 bool HeapShared::_disable_writing = false;
  84 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
  85 GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL;
  86 
  87 #ifndef PRODUCT
  88 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
  89 static Array<char>* _archived_ArchiveHeapTestClass = NULL;
  90 static const char* _test_class_name = NULL;
  91 static const Klass* _test_class = NULL;
  92 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL;
  93 #endif
  94 
  95 
  96 //
  97 // If you add new entries to the following tables, you should know what you're doing!
  98 //
  99 
 100 // Entry fields for shareable subgraphs archived in the closed archive heap
 101 // region. Warning: Objects in the subgraphs should not have reference fields
 102 // assigned at runtime.
 103 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
 104   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 105   {"java/lang/Long$LongCache",                    "archivedCache"},
 106   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 107   {"java/lang/Short$ShortCache",                  "archivedCache"},
 108   {"java/lang/Character$CharacterCache",          "archivedCache"},
 109   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 110   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 111   {NULL, NULL},
 112 };
 113 // Entry fields for subgraphs archived in the open archive heap region.
 114 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
 115   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 116   {"java/util/ImmutableCollections",              "archivedObjects"},
 117   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 118   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 119   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 120 #ifndef PRODUCT
 121   {NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass
 122 #endif
 123   {NULL, NULL},
 124 };
 125 
 126 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
 127 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
 128   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 129   {"jdk/internal/module/ArchivedBootLayer",       "archivedBootLayer"},
 130   {"java/lang/Module$ArchivedData",               "archivedData"},
 131   {NULL, NULL},
 132 };
 133 
 134 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
 135 OopHandle HeapShared::_roots;
 136 
 137 #ifdef ASSERT
 138 bool HeapShared::is_archived_object_during_dumptime(oop p) {
 139   assert(HeapShared::can_write(), "must be");
 140   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
 141   return Universe::heap()->is_archived_object(p);
 142 }
 143 #endif
 144 
 145 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 146   for (int i = 0; fields[i].valid(); i++) {
 147     if (fields[i].klass == ik) {
 148       return true;
 149     }
 150   }
 151   return false;
 152 }
 153 
 154 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 155   return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) ||
 156          is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) ||
 157          is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik);
 158 }
 159 
 160 unsigned HeapShared::oop_hash(oop const& p) {
 161   // Do not call p->identity_hash() as that will update the
 162   // object header.
 163   return primitive_hash(cast_from_oop<intptr_t>(p));
 164 }
 165 
 166 static void reset_states(oop obj, TRAPS) {
 167   Handle h_obj(THREAD, obj);
 168   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 169   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 170   Symbol* method_sig = vmSymbols::void_method_signature();
 171 
 172   while (klass != NULL) {
 173     Method* method = klass->find_method(method_name, method_sig);
 174     if (method != NULL) {
 175       assert(method->is_private(), "must be");
 176       if (log_is_enabled(Debug, cds)) {
 177         ResourceMark rm(THREAD);
 178         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 179       }
 180       JavaValue result(T_VOID);
 181       JavaCalls::call_special(&result, h_obj, klass,
 182                               method_name, method_sig, CHECK);
 183     }
 184     klass = klass->java_super();
 185   }
 186 }
 187 
 188 void HeapShared::reset_archived_object_states(TRAPS) {
 189   assert(DumpSharedSpaces, "dump-time only");
 190   log_debug(cds)("Resetting platform loader");
 191   reset_states(SystemDictionary::java_platform_loader(), CHECK);
 192   log_debug(cds)("Resetting system loader");
 193   reset_states(SystemDictionary::java_system_loader(), CHECK);
 194 
 195   // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not
 196   // directly used for class loading, but rather is used by the core library
 197   // to keep track of resources, etc, loaded by the null class loader.
 198   //
 199   // Note, this object is non-null, and is not the same as
 200   // ClassLoaderData::the_null_class_loader_data()->class_loader(),
 201   // which is null.
 202   log_debug(cds)("Resetting boot loader");
 203   JavaValue result(T_OBJECT);
 204   JavaCalls::call_static(&result,
 205                          vmClasses::jdk_internal_loader_ClassLoaders_klass(),
 206                          vmSymbols::bootLoader_name(),
 207                          vmSymbols::void_BuiltinClassLoader_signature(),
 208                          CHECK);
 209   Handle boot_loader(THREAD, result.get_oop());
 210   reset_states(boot_loader(), CHECK);
 211 }
 212 
 213 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 214 HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL;
 215 oop HeapShared::find_archived_heap_object(oop obj) {
 216   assert(DumpSharedSpaces, "dump-time only");
 217   ArchivedObjectCache* cache = archived_object_cache();
 218   CachedOopInfo* p = cache->get(obj);
 219   if (p != NULL) {
 220     return p->_obj;
 221   } else {
 222     return NULL;
 223   }
 224 }
 225 
 226 int HeapShared::append_root(oop obj) {
 227   assert(DumpSharedSpaces, "dump-time only");
 228 
 229   // No GC should happen since we aren't scanning _pending_roots.
 230   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 231 
 232   if (_pending_roots == NULL) {
 233     _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 234   }
 235 
 236   return _pending_roots->append(obj);
 237 }
 238 
 239 objArrayOop HeapShared::roots() {
 240   if (DumpSharedSpaces) {
 241     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 242     if (!HeapShared::can_write()) {
 243       return NULL;
 244     }
 245   } else {
 246     assert(UseSharedSpaces, "must be");
 247   }
 248 
 249   objArrayOop roots = (objArrayOop)_roots.resolve();
 250   assert(roots != NULL, "should have been initialized");
 251   return roots;
 252 }
 253 
 254 // Returns an objArray that contains all the roots of the archived objects
 255 oop HeapShared::get_root(int index, bool clear) {
 256   assert(index >= 0, "sanity");
 257   if (DumpSharedSpaces) {
 258     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 259     assert(_pending_roots != NULL, "sanity");
 260     return _pending_roots->at(index);
 261   } else {
 262     assert(UseSharedSpaces, "must be");
 263     assert(!_roots.is_empty(), "must have loaded shared heap");
 264     oop result = roots()->obj_at(index);
 265     if (clear) {
 266       clear_root(index);
 267     }
 268     return result;
 269   }
 270 }
 271 
 272 void HeapShared::clear_root(int index) {
 273   assert(index >= 0, "sanity");
 274   assert(UseSharedSpaces, "must be");
 275   if (ArchiveHeapLoader::is_fully_available()) {
 276     if (log_is_enabled(Debug, cds, heap)) {
 277       oop old = roots()->obj_at(index);
 278       log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
 279     }
 280     roots()->obj_at_put(index, NULL);
 281   }
 282 }
 283 
 284 oop HeapShared::archive_object(oop obj) {
 285   assert(DumpSharedSpaces, "dump-time only");
 286 
 287   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 288 
 289   oop ao = find_archived_heap_object(obj);
 290   if (ao != NULL) {
 291     // already archived
 292     return ao;
 293   }
 294 
 295   int len = obj->size();
 296   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 297     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 298                          p2i(obj), (size_t)obj->size());
 299     return NULL;
 300   }
 301 
 302   oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
 303   if (archived_oop != NULL) {
 304     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 305     // Reinitialize markword to remove age/marking/locking/etc.
 306     //
 307     // We need to retain the identity_hash, because it may have been used by some hashtables
 308     // in the shared heap. This also has the side effect of pre-initializing the
 309     // identity_hash for all shared objects, so they are less likely to be written
 310     // into during run time, increasing the potential of memory sharing.
 311     if (!(EnableValhalla && obj->mark().is_inline_type())) {
 312       int hash_original = obj->identity_hash();
 313       archived_oop->set_mark(archived_oop->klass()->prototype_header().copy_set_hash(hash_original));
 314       assert(archived_oop->mark().is_unlocked(), "sanity");
 315 
 316       DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 317       assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 318     }
 319 
 320     ArchivedObjectCache* cache = archived_object_cache();
 321     CachedOopInfo info = make_cached_oop_info(archived_oop);
 322     cache->put(obj, info);
 323     if (_original_object_table != NULL) {
 324       _original_object_table->put(archived_oop, obj);
 325     }
 326     mark_native_pointers(obj, archived_oop);
 327     if (log_is_enabled(Debug, cds, heap)) {
 328       ResourceMark rm;
 329       log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
 330                            p2i(obj), p2i(archived_oop), obj->klass()->external_name());
 331     }
 332   } else {
 333     log_error(cds, heap)(
 334       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 335       p2i(obj));
 336     log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
 337         SIZE_FORMAT "M", MaxHeapSize/M);
 338     os::_exit(-1);
 339   }
 340   return archived_oop;
 341 }
 342 
 343 void HeapShared::archive_klass_objects() {
 344   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
 345   assert(klasses != NULL, "sanity");
 346   for (int i = 0; i < klasses->length(); i++) {
 347     Klass* k = ArchiveBuilder::get_buffered_klass(klasses->at(i));
 348 
 349     // archive mirror object
 350     java_lang_Class::archive_mirror(k);
 351 
 352     // archive the resolved_referenes array
 353     if (k->is_instance_klass()) {
 354       InstanceKlass* ik = InstanceKlass::cast(k);
 355       ik->constants()->archive_resolved_references();
 356     }
 357   }
 358 }
 359 
 360 void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
 361   if (java_lang_Class::is_instance(orig_obj)) {
 362     mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset());
 363     mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset());
 364   }
 365 }
 366 
 367 void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
 368   Metadata* ptr = archived_obj->metadata_field_acquire(offset);
 369   if (ptr != NULL) {
 370     // Set the native pointer to the requested address (at runtime, if the metadata
 371     // is mapped at the default location, it will be at this address).
 372     address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
 373     address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr);
 374     archived_obj->metadata_field_put(offset, (Metadata*)requested_addr);
 375 
 376     // Remember this pointer. At runtime, if the metadata is mapped at a non-default
 377     // location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()).
 378     _native_pointers->append(archived_obj->field_addr<Metadata*>(offset));
 379 
 380     log_debug(cds, heap, mirror)(
 381         "Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT,
 382          offset, p2i(ptr), p2i(requested_addr));
 383   }
 384 }
 385 
 386 // -- Handling of Enum objects
 387 // Java Enum classes have synthetic <clinit> methods that look like this
 388 //     enum MyEnum {FOO, BAR}
 389 //     MyEnum::<clinint> {
 390 //        /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO");
 391 //        /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR");
 392 //     }
 393 //
 394 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must
 395 // ensure the archived value equals (in object address) to the runtime value of
 396 // MyEnum::FOO.
 397 //
 398 // However, since MyEnum::<clinint> is synthetically generated by javac, there's
 399 // no way of programmatically handling this inside the Java code (as you would handle
 400 // ModuleLayer::EMPTY_LAYER, for example).
 401 //
 402 // Instead, we archive all static field of such Enum classes. At runtime,
 403 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull
 404 // the static fields out of the archived heap.
 405 void HeapShared::check_enum_obj(int level,
 406                                 KlassSubGraphInfo* subgraph_info,
 407                                 oop orig_obj,
 408                                 bool is_closed_archive) {
 409   Klass* k = orig_obj->klass();
 410   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
 411   if (!k->is_instance_klass()) {
 412     return;
 413   }
 414   InstanceKlass* ik = InstanceKlass::cast(k);
 415   if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) {
 416     ResourceMark rm;
 417     ik->set_has_archived_enum_objs();
 418     buffered_k->set_has_archived_enum_objs();
 419     oop mirror = ik->java_mirror();
 420 
 421     for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
 422       if (fs.access_flags().is_static()) {
 423         fieldDescriptor& fd = fs.field_descriptor();
 424         if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) {
 425           guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY",
 426                     ik->external_name(), fd.name()->as_C_string());
 427         }
 428         oop oop_field = mirror->obj_field(fd.offset());
 429         if (oop_field == NULL) {
 430           guarantee(false, "static field %s::%s must not be null",
 431                     ik->external_name(), fd.name()->as_C_string());
 432         } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) {
 433           guarantee(false, "static field %s::%s is of the wrong type",
 434                     ik->external_name(), fd.name()->as_C_string());
 435         }
 436         oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
 437         int root_index = append_root(archived_oop_field);
 438         log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")",
 439                             root_index, ik->external_name(), fd.name()->as_C_string(),
 440                             p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field));
 441         SystemDictionaryShared::add_enum_klass_static_field(ik, root_index);
 442       }
 443     }
 444   }
 445 }
 446 
 447 // See comments in HeapShared::check_enum_obj()
 448 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
 449   if (!ArchiveHeapLoader::is_fully_available()) {
 450     return false;
 451   }
 452 
 453   RunTimeClassInfo* info = RunTimeClassInfo::get_for(k);
 454   assert(info != NULL, "sanity");
 455 
 456   if (log_is_enabled(Info, cds, heap)) {
 457     ResourceMark rm;
 458     log_info(cds, heap)("Initializing Enum class: %s", k->external_name());
 459   }
 460 
 461   oop mirror = k->java_mirror();
 462   int i = 0;
 463   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 464     if (fs.access_flags().is_static()) {
 465       int root_index = info->enum_klass_static_field_root_index_at(i++);
 466       fieldDescriptor& fd = fs.field_descriptor();
 467       assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be");
 468       mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true));
 469     }
 470   }
 471   return true;
 472 }
 473 
 474 void HeapShared::run_full_gc_in_vm_thread() {
 475   if (HeapShared::can_write()) {
 476     // Avoid fragmentation while archiving heap objects.
 477     // We do this inside a safepoint, so that no further allocation can happen after GC
 478     // has finished.
 479     if (GCLocker::is_active()) {
 480       // Just checking for safety ...
 481       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 482       // has been modified such that JNI code is executed in some clean up threads after
 483       // we have finished class loading.
 484       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 485     } else {
 486       log_info(cds)("Run GC ...");
 487       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 488       log_info(cds)("Run GC done");
 489     }
 490   }
 491 }
 492 
 493 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
 494                                  GrowableArray<MemRegion>* open_regions) {
 495 
 496   G1HeapVerifier::verify_ready_for_archiving();
 497 
 498   {
 499     NoSafepointVerifier nsv;
 500 
 501     // Cache for recording where the archived objects are copied to
 502     create_archived_object_cache(log_is_enabled(Info, cds, map));
 503 
 504     log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 505                    UseCompressedOops ? p2i(CompressedOops::begin()) :
 506                                        p2i((address)G1CollectedHeap::heap()->reserved().start()),
 507                    UseCompressedOops ? p2i(CompressedOops::end()) :
 508                                        p2i((address)G1CollectedHeap::heap()->reserved().end()));
 509     log_info(cds)("Dumping objects to closed archive heap region ...");
 510     copy_closed_objects(closed_regions);
 511 
 512     log_info(cds)("Dumping objects to open archive heap region ...");
 513     copy_open_objects(open_regions);
 514 
 515     CDSHeapVerifier::verify();
 516   }
 517 
 518   G1HeapVerifier::verify_archive_regions();
 519 }
 520 
 521 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
 522   assert(HeapShared::can_write(), "must be");
 523 
 524   G1CollectedHeap::heap()->begin_archive_alloc_range();
 525 
 526   // Archive interned string objects
 527   StringTable::write_to_archive(_dumped_interned_strings);
 528 
 529   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 530                            true /* is_closed_archive */,
 531                            false /* is_full_module_graph */);
 532 
 533   G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
 534                                                    os::vm_allocation_granularity());
 535 }
 536 
 537 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
 538   assert(HeapShared::can_write(), "must be");
 539 
 540   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 541 
 542   java_lang_Class::archive_basic_type_mirrors();
 543 
 544   archive_klass_objects();
 545 
 546   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 547                            false /* is_closed_archive */,
 548                            false /* is_full_module_graph */);
 549   if (MetaspaceShared::use_full_module_graph()) {
 550     archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
 551                              false /* is_closed_archive */,
 552                              true /* is_full_module_graph */);
 553     ClassLoaderDataShared::init_archived_oops();
 554   }
 555 
 556   copy_roots();
 557 
 558   G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
 559                                                    os::vm_allocation_granularity());
 560 }
 561 
 562 // Copy _pending_archive_roots into an objArray
 563 void HeapShared::copy_roots() {
 564   // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
 565   // objects in this array are discovered during HeapShared::archive_objects(). For example,
 566   // in HeapShared::archive_reachable_objects_from() ->  HeapShared::check_enum_obj().
 567   // However, HeapShared::archive_objects() happens inside a safepoint, so we can't
 568   // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
 569   // Instead, we have to roll our own alloc/copy routine here.
 570   int length = _pending_roots != NULL ? _pending_roots->length() : 0;
 571   size_t size = objArrayOopDesc::object_size(length);
 572   Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
 573   HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
 574 
 575   memset(mem, 0, size * BytesPerWord);
 576   {
 577     // This is copied from MemAllocator::finish
 578     oopDesc::set_mark(mem, k->prototype_header());
 579     oopDesc::release_set_klass(mem, k);
 580   }
 581   {
 582     // This is copied from ObjArrayAllocator::initialize
 583     arrayOopDesc::set_length(mem, length);
 584   }
 585 
 586   _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
 587   for (int i = 0; i < length; i++) {
 588     roots()->obj_at_put(i, _pending_roots->at(i));
 589   }
 590   log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
 591 }
 592 
 593 //
 594 // Subgraph archiving support
 595 //
 596 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 597 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 598 
 599 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 600 // there is no existing one for k. The subgraph_info records the "buffered"
 601 // address of the class.
 602 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
 603   assert(DumpSharedSpaces, "dump time only");
 604   bool created;
 605   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
 606   KlassSubGraphInfo* info =
 607     _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph),
 608                                                   &created);
 609   assert(created, "must not initialize twice");
 610   return info;
 611 }
 612 
 613 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 614   assert(DumpSharedSpaces, "dump time only");
 615   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
 616   assert(info != NULL, "must have been initialized");
 617   return info;
 618 }
 619 
 620 // Add an entry field to the current KlassSubGraphInfo.
 621 void KlassSubGraphInfo::add_subgraph_entry_field(
 622       int static_field_offset, oop v, bool is_closed_archive) {
 623   assert(DumpSharedSpaces, "dump time only");
 624   if (_subgraph_entry_fields == NULL) {
 625     _subgraph_entry_fields =
 626       new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass);
 627   }
 628   _subgraph_entry_fields->append(static_field_offset);
 629   _subgraph_entry_fields->append(HeapShared::append_root(v));
 630 }
 631 
 632 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 633 // Only objects of boot classes can be included in sub-graph.
 634 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
 635   assert(DumpSharedSpaces, "dump time only");
 636   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
 637 
 638   if (_subgraph_object_klasses == NULL) {
 639     _subgraph_object_klasses =
 640       new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
 641   }
 642 
 643   assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
 644 
 645   if (_k == buffered_k) {
 646     // Don't add the Klass containing the sub-graph to it's own klass
 647     // initialization list.
 648     return;
 649   }
 650 
 651   if (buffered_k->is_instance_klass()) {
 652     assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
 653           "must be boot class");
 654     // vmClasses::xxx_klass() are not updated, need to check
 655     // the original Klass*
 656     if (orig_k == vmClasses::String_klass() ||
 657         orig_k == vmClasses::Object_klass()) {
 658       // Initialized early during VM initialization. No need to be added
 659       // to the sub-graph object class list.
 660       return;
 661     }
 662     check_allowed_klass(InstanceKlass::cast(orig_k));
 663   } else if (buffered_k->is_objArray_klass()) {
 664     Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
 665     if (abk->is_instance_klass()) {
 666       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 667             "must be boot class");
 668       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
 669     }
 670     if (buffered_k == Universe::objectArrayKlassObj()) {
 671       // Initialized early during Universe::genesis. No need to be added
 672       // to the list.
 673       return;
 674     }
 675   } else {
 676     assert(buffered_k->is_typeArray_klass(), "must be");
 677     // Primitive type arrays are created early during Universe::genesis.
 678     return;
 679   }
 680 
 681   if (log_is_enabled(Debug, cds, heap)) {
 682     if (!_subgraph_object_klasses->contains(buffered_k)) {
 683       ResourceMark rm;
 684       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 685     }
 686   }
 687 
 688   _subgraph_object_klasses->append_if_missing(buffered_k);
 689   _has_non_early_klasses |= is_non_early_klass(orig_k);
 690 }
 691 
 692 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
 693   if (ik->module()->name() == vmSymbols::java_base()) {
 694     assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package");
 695     return;
 696   }
 697 
 698 #ifndef PRODUCT
 699   if (!ik->module()->is_named() && ik->package() == NULL) {
 700     // This class is loaded by ArchiveHeapTestClass
 701     return;
 702   }
 703   const char* extra_msg = ", or in an unnamed package of an unnamed module";
 704 #else
 705   const char* extra_msg = "";
 706 #endif
 707 
 708   ResourceMark rm;
 709   log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
 710                        ik->external_name(), extra_msg);
 711   os::_exit(1);
 712 }
 713 
 714 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
 715   if (k->is_objArray_klass()) {
 716     k = ObjArrayKlass::cast(k)->bottom_klass();
 717   }
 718   if (k->is_instance_klass()) {
 719     if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
 720       ResourceMark rm;
 721       log_info(cds, heap)("non-early: %s", k->external_name());
 722       return true;
 723     } else {
 724       return false;
 725     }
 726   } else {
 727     return false;
 728   }
 729 }
 730 
 731 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 732 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 733   _k = info->klass();
 734   _entry_field_records = NULL;
 735   _subgraph_object_klasses = NULL;
 736   _is_full_module_graph = info->is_full_module_graph();
 737 
 738   if (_is_full_module_graph) {
 739     // Consider all classes referenced by the full module graph as early -- we will be
 740     // allocating objects of these classes during JVMTI early phase, so they cannot
 741     // be processed by (non-early) JVMTI ClassFileLoadHook
 742     _has_non_early_klasses = false;
 743   } else {
 744     _has_non_early_klasses = info->has_non_early_klasses();
 745   }
 746 
 747   if (_has_non_early_klasses) {
 748     ResourceMark rm;
 749     log_info(cds, heap)(
 750           "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
 751           _k->external_name());
 752   }
 753 
 754   // populate the entry fields
 755   GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
 756   if (entry_fields != NULL) {
 757     int num_entry_fields = entry_fields->length();
 758     assert(num_entry_fields % 2 == 0, "sanity");
 759     _entry_field_records =
 760       ArchiveBuilder::new_ro_array<int>(num_entry_fields);
 761     for (int i = 0 ; i < num_entry_fields; i++) {
 762       _entry_field_records->at_put(i, entry_fields->at(i));
 763     }
 764   }
 765 
 766   // the Klasses of the objects in the sub-graphs
 767   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 768   if (subgraph_object_klasses != NULL) {
 769     int num_subgraphs_klasses = subgraph_object_klasses->length();
 770     _subgraph_object_klasses =
 771       ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
 772     for (int i = 0; i < num_subgraphs_klasses; i++) {
 773       Klass* subgraph_k = subgraph_object_klasses->at(i);
 774       if (log_is_enabled(Info, cds, heap)) {
 775         ResourceMark rm;
 776         log_info(cds, heap)(
 777           "Archived object klass %s (%2d) => %s",
 778           _k->external_name(), i, subgraph_k->external_name());
 779       }
 780       _subgraph_object_klasses->at_put(i, subgraph_k);
 781       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 782     }
 783   }
 784 
 785   ArchivePtrMarker::mark_pointer(&_k);
 786   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 787   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 788 }
 789 
 790 struct CopyKlassSubGraphInfoToArchive : StackObj {
 791   CompactHashtableWriter* _writer;
 792   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 793 
 794   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 795     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 796       ArchivedKlassSubGraphInfoRecord* record =
 797         (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 798       record->init(&info);
 799 
 800       Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
 801       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
 802       u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
 803       _writer->add(hash, delta);
 804     }
 805     return true; // keep on iterating
 806   }
 807 };
 808 
 809 // Build the records of archived subgraph infos, which include:
 810 // - Entry points to all subgraphs from the containing class mirror. The entry
 811 //   points are static fields in the mirror. For each entry point, the field
 812 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 813 //   info. The value is stored back to the corresponding field at runtime.
 814 // - A list of klasses that need to be loaded/initialized before archived
 815 //   java object sub-graph can be accessed at runtime.
 816 void HeapShared::write_subgraph_info_table() {
 817   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 818   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 819   CompactHashtableStats stats;
 820 
 821   _run_time_subgraph_info_table.reset();
 822 
 823   CompactHashtableWriter writer(d_table->_count, &stats);
 824   CopyKlassSubGraphInfoToArchive copy(&writer);
 825   d_table->iterate(&copy);
 826   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 827 
 828 #ifndef PRODUCT
 829   if (ArchiveHeapTestClass != NULL) {
 830     size_t len = strlen(ArchiveHeapTestClass) + 1;
 831     Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
 832     strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
 833     _archived_ArchiveHeapTestClass = array;
 834   }
 835 #endif
 836 }
 837 
 838 void HeapShared::serialize(SerializeClosure* soc) {
 839   oop roots_oop = NULL;
 840 
 841   if (soc->reading()) {
 842     soc->do_oop(&roots_oop); // read from archive
 843     assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
 844     // Create an OopHandle only if we have actually mapped or loaded the roots
 845     if (roots_oop != NULL) {
 846       assert(ArchiveHeapLoader::is_fully_available(), "must be");
 847       _roots = OopHandle(Universe::vm_global(), roots_oop);
 848     }
 849   } else {
 850     // writing
 851     roots_oop = roots();
 852     soc->do_oop(&roots_oop); // write to archive
 853   }
 854 
 855 #ifndef PRODUCT
 856   soc->do_ptr((void**)&_archived_ArchiveHeapTestClass);
 857   if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) {
 858     _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
 859     setup_test_class(_test_class_name);
 860   }
 861 #endif
 862 
 863   _run_time_subgraph_info_table.serialize_header(soc);
 864 }
 865 
 866 static void verify_the_heap(Klass* k, const char* which) {
 867   if (VerifyArchivedFields > 0) {
 868     ResourceMark rm;
 869     log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
 870                         which, k->external_name());
 871 
 872     VM_Verify verify_op;
 873     VMThread::execute(&verify_op);
 874 
 875     if (VerifyArchivedFields > 1 && is_init_completed()) {
 876       // At this time, the oop->klass() of some archived objects in the heap may not
 877       // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
 878       // have enough information (object size, oop maps, etc) so that a GC can be safely
 879       // performed.
 880       //
 881       // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
 882       // to check for GC safety.
 883       log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
 884                           which, k->external_name());
 885       FlagSetting fs1(VerifyBeforeGC, true);
 886       FlagSetting fs2(VerifyDuringGC, true);
 887       FlagSetting fs3(VerifyAfterGC,  true);
 888       Universe::heap()->collect(GCCause::_java_lang_system_gc);
 889     }
 890   }
 891 }
 892 
 893 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
 894 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
 895 //
 896 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
 897 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
 898 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
 899 void HeapShared::resolve_classes(JavaThread* current) {
 900   assert(UseSharedSpaces, "runtime only!");
 901   if (!ArchiveHeapLoader::is_fully_available()) {
 902     return; // nothing to do
 903   }
 904   resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields);
 905   resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields);
 906   resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields);
 907 }
 908 
 909 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
 910   for (int i = 0; fields[i].valid(); i++) {
 911     ArchivableStaticFieldInfo* info = &fields[i];
 912     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
 913     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
 914     assert(k != NULL && k->is_shared_boot_class(), "sanity");
 915     resolve_classes_for_subgraph_of(current, k);
 916   }
 917 }
 918 
 919 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
 920   JavaThread* THREAD = current;
 921   ExceptionMark em(THREAD);
 922   const ArchivedKlassSubGraphInfoRecord* record =
 923    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
 924   if (HAS_PENDING_EXCEPTION) {
 925    CLEAR_PENDING_EXCEPTION;
 926   }
 927   if (record == NULL) {
 928    clear_archived_roots_of(k);
 929   }
 930 }
 931 
 932 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
 933   JavaThread* THREAD = current;
 934   if (!ArchiveHeapLoader::is_fully_available()) {
 935     return; // nothing to do
 936   }
 937 
 938   ExceptionMark em(THREAD);
 939   const ArchivedKlassSubGraphInfoRecord* record =
 940     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
 941 
 942   if (HAS_PENDING_EXCEPTION) {
 943     CLEAR_PENDING_EXCEPTION;
 944     // None of the field value will be set if there was an exception when initializing the classes.
 945     // The java code will not see any of the archived objects in the
 946     // subgraphs referenced from k in this case.
 947     return;
 948   }
 949 
 950   if (record != NULL) {
 951     init_archived_fields_for(k, record);
 952   }
 953 }
 954 
 955 const ArchivedKlassSubGraphInfoRecord*
 956 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
 957   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 958 
 959   if (!k->is_shared()) {
 960     return NULL;
 961   }
 962   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 963   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 964 
 965 #ifndef PRODUCT
 966   if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) {
 967     _test_class = k;
 968     _test_class_record = record;
 969   }
 970 #endif
 971 
 972   // Initialize from archived data. Currently this is done only
 973   // during VM initialization time. No lock is needed.
 974   if (record != NULL) {
 975     if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
 976       if (log_is_enabled(Info, cds, heap)) {
 977         ResourceMark rm(THREAD);
 978         log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
 979                             k->external_name());
 980       }
 981       return NULL;
 982     }
 983 
 984     if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
 985       if (log_is_enabled(Info, cds, heap)) {
 986         ResourceMark rm(THREAD);
 987         log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
 988                             k->external_name());
 989       }
 990       return NULL;
 991     }
 992 
 993     if (log_is_enabled(Info, cds, heap)) {
 994       ResourceMark rm;
 995       log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
 996     }
 997 
 998     resolve_or_init(k, do_init, CHECK_NULL);
 999 
1000     // Load/link/initialize the klasses of the objects in the subgraph.
1001     // NULL class loader is used.
1002     Array<Klass*>* klasses = record->subgraph_object_klasses();
1003     if (klasses != NULL) {
1004       for (int i = 0; i < klasses->length(); i++) {
1005         Klass* klass = klasses->at(i);
1006         if (!klass->is_shared()) {
1007           return NULL;
1008         }
1009         resolve_or_init(klass, do_init, CHECK_NULL);
1010       }
1011     }
1012   }
1013 
1014   return record;
1015 }
1016 
1017 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1018   if (!do_init) {
1019     if (k->class_loader_data() == NULL) {
1020       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1021       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1022     }
1023   } else {
1024     assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
1025     if (k->is_instance_klass()) {
1026       InstanceKlass* ik = InstanceKlass::cast(k);
1027       ik->initialize(CHECK);
1028     } else if (k->is_objArray_klass()) {
1029       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1030       oak->initialize(CHECK);
1031     }
1032   }
1033 }
1034 
1035 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1036   verify_the_heap(k, "before");
1037 
1038   // Load the subgraph entry fields from the record and store them back to
1039   // the corresponding fields within the mirror.
1040   oop m = k->java_mirror();
1041   Array<int>* entry_field_records = record->entry_field_records();
1042   if (entry_field_records != NULL) {
1043     int efr_len = entry_field_records->length();
1044     assert(efr_len % 2 == 0, "sanity");
1045     for (int i = 0; i < efr_len; i += 2) {
1046       int field_offset = entry_field_records->at(i);
1047       int root_index = entry_field_records->at(i+1);
1048       oop v = get_root(root_index, /*clear=*/true);
1049       m->obj_field_put(field_offset, v);
1050       log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1051     }
1052 
1053     // Done. Java code can see the archived sub-graphs referenced from k's
1054     // mirror after this point.
1055     if (log_is_enabled(Info, cds, heap)) {
1056       ResourceMark rm;
1057       log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
1058                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
1059     }
1060   }
1061 
1062   verify_the_heap(k, "after ");
1063 }
1064 
1065 void HeapShared::clear_archived_roots_of(Klass* k) {
1066   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1067   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1068   if (record != NULL) {
1069     Array<int>* entry_field_records = record->entry_field_records();
1070     if (entry_field_records != NULL) {
1071       int efr_len = entry_field_records->length();
1072       assert(efr_len % 2 == 0, "sanity");
1073       for (int i = 0; i < efr_len; i += 2) {
1074         int root_index = entry_field_records->at(i+1);
1075         clear_root(root_index);
1076       }
1077     }
1078   }
1079 }
1080 
1081 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1082   int _level;
1083   bool _is_closed_archive;
1084   bool _record_klasses_only;
1085   KlassSubGraphInfo* _subgraph_info;
1086   oop _orig_referencing_obj;
1087   oop _archived_referencing_obj;
1088 
1089   // The following are for maintaining a stack for determining
1090   // CachedOopInfo::_referrer
1091   static WalkOopAndArchiveClosure* _current;
1092   WalkOopAndArchiveClosure* _last;
1093  public:
1094   WalkOopAndArchiveClosure(int level,
1095                            bool is_closed_archive,
1096                            bool record_klasses_only,
1097                            KlassSubGraphInfo* subgraph_info,
1098                            oop orig, oop archived) :
1099     _level(level), _is_closed_archive(is_closed_archive),
1100     _record_klasses_only(record_klasses_only),
1101     _subgraph_info(subgraph_info),
1102     _orig_referencing_obj(orig), _archived_referencing_obj(archived) {
1103     _last = _current;
1104     _current = this;
1105   }
1106   ~WalkOopAndArchiveClosure() {
1107     _current = _last;
1108   }
1109   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1110   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1111 
1112  protected:
1113   template <class T> void do_oop_work(T *p) {
1114     oop obj = RawAccess<>::oop_load(p);
1115     if (!CompressedOops::is_null(obj)) {
1116       assert(!HeapShared::is_archived_object_during_dumptime(obj),
1117              "original objects must not point to archived objects");
1118 
1119       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
1120       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
1121 
1122       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1123         ResourceMark rm;
1124         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1125                              _orig_referencing_obj->klass()->external_name(), field_delta,
1126                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1127         LogTarget(Trace, cds, heap) log;
1128         LogStream out(log);
1129         obj->print_on(&out);
1130       }
1131 
1132       oop archived = HeapShared::archive_reachable_objects_from(
1133           _level + 1, _subgraph_info, obj, _is_closed_archive);
1134       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
1135       assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
1136 
1137       if (!_record_klasses_only) {
1138         // Update the reference in the archived copy of the referencing object.
1139         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
1140                              _level, p2i(new_p), p2i(obj), p2i(archived));
1141         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
1142       }
1143     }
1144   }
1145 
1146  public:
1147   static WalkOopAndArchiveClosure* current()  { return _current;              }
1148   oop orig_referencing_obj()                  { return _orig_referencing_obj; }
1149   KlassSubGraphInfo* subgraph_info()          { return _subgraph_info;        }
1150 };
1151 
1152 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL;
1153 
1154 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
1155   CachedOopInfo info;
1156   WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1157 
1158   info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info();
1159   info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj();
1160   info._obj = orig_obj;
1161 
1162   return info;
1163 }
1164 
1165 void HeapShared::check_closed_region_object(InstanceKlass* k) {
1166   // Check fields in the object
1167   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
1168     if (!fs.access_flags().is_static()) {
1169       BasicType ft = fs.field_descriptor().field_type();
1170       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
1171         ResourceMark rm;
1172         log_warning(cds, heap)(
1173           "Please check reference field in %s instance in closed archive heap region: %s %s",
1174           k->external_name(), (fs.name())->as_C_string(),
1175           (fs.signature())->as_C_string());
1176       }
1177     }
1178   }
1179 }
1180 
1181 void HeapShared::check_module_oop(oop orig_module_obj) {
1182   assert(DumpSharedSpaces, "must be");
1183   assert(java_lang_Module::is_instance(orig_module_obj), "must be");
1184   ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj);
1185   if (orig_module_ent == NULL) {
1186     // These special Module objects are created in Java code. They are not
1187     // defined via Modules::define_module(), so they don't have a ModuleEntry:
1188     //     java.lang.Module::ALL_UNNAMED_MODULE
1189     //     java.lang.Module::EVERYONE_MODULE
1190     //     jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule
1191     assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed");
1192     log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj));
1193   } else {
1194     ClassLoaderData* loader_data = orig_module_ent->loader_data();
1195     assert(loader_data->is_builtin_class_loader_data(), "must be");
1196   }
1197 }
1198 
1199 
1200 // (1) If orig_obj has not been archived yet, archive it.
1201 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1202 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1203 // (3) Record the klasses of all orig_obj and all reachable objects.
1204 oop HeapShared::archive_reachable_objects_from(int level,
1205                                                KlassSubGraphInfo* subgraph_info,
1206                                                oop orig_obj,
1207                                                bool is_closed_archive) {
1208   assert(orig_obj != NULL, "must be");
1209   assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
1210 
1211   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1212     // This object has injected fields that cannot be supported easily, so we disallow them for now.
1213     // If you get an error here, you probably made a change in the JDK library that has added
1214     // these objects that are referenced (directly or indirectly) by static fields.
1215     ResourceMark rm;
1216     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1217     os::_exit(1);
1218   }
1219 
1220   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
1221   // them as Klass::_archived_mirror because they need to be specially restored at run time.
1222   //
1223   // If you get an error here, you probably made a change in the JDK library that has added a Class
1224   // object that is referenced (directly or indirectly) by static fields.
1225   if (java_lang_Class::is_instance(orig_obj)) {
1226     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1227     os::_exit(1);
1228   }
1229 
1230   oop archived_obj = find_archived_heap_object(orig_obj);
1231   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
1232     // To save time, don't walk strings that are already archived. They just contain
1233     // pointers to a type array, whose klass doesn't need to be recorded.
1234     return archived_obj;
1235   }
1236 
1237   if (has_been_seen_during_subgraph_recording(orig_obj)) {
1238     // orig_obj has already been archived and traced. Nothing more to do.
1239     return archived_obj;
1240   } else {
1241     set_has_been_seen_during_subgraph_recording(orig_obj);
1242   }
1243 
1244   bool record_klasses_only = (archived_obj != NULL);
1245   if (archived_obj == NULL) {
1246     ++_num_new_archived_objs;
1247     archived_obj = archive_object(orig_obj);
1248     if (archived_obj == NULL) {
1249       // Skip archiving the sub-graph referenced from the current entry field.
1250       ResourceMark rm;
1251       log_error(cds, heap)(
1252         "Cannot archive the sub-graph referenced from %s object ("
1253         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1254         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1255       if (level == 1) {
1256         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1257         // as the Java code will take care of initializing this field dynamically.
1258         return NULL;
1259       } else {
1260         // We don't know how to handle an object that has been archived, but some of its reachable
1261         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1262         // we have a real use case.
1263         os::_exit(1);
1264       }
1265     }
1266 
1267     if (java_lang_Module::is_instance(orig_obj)) {
1268       check_module_oop(orig_obj);
1269       java_lang_Module::set_module_entry(archived_obj, NULL);
1270       java_lang_Module::set_loader(archived_obj, NULL);
1271     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1272       // class_data will be restored explicitly at run time.
1273       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
1274                 orig_obj == SystemDictionary::java_system_loader() ||
1275                 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
1276       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
1277     }
1278   }
1279 
1280   assert(archived_obj != NULL, "must be");
1281   Klass *orig_k = orig_obj->klass();
1282   subgraph_info->add_subgraph_object_klass(orig_k);
1283 
1284   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1285                                   subgraph_info, orig_obj, archived_obj);
1286   orig_obj->oop_iterate(&walker);
1287   if (is_closed_archive && orig_k->is_instance_klass()) {
1288     check_closed_region_object(InstanceKlass::cast(orig_k));
1289   }
1290 
1291   check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive);
1292   return archived_obj;
1293 }
1294 
1295 //
1296 // Start from the given static field in a java mirror and archive the
1297 // complete sub-graph of java heap objects that are reached directly
1298 // or indirectly from the starting object by following references.
1299 // Sub-graph archiving restrictions (current):
1300 //
1301 // - All classes of objects in the archived sub-graph (including the
1302 //   entry class) must be boot class only.
1303 // - No java.lang.Class instance (java mirror) can be included inside
1304 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
1305 //
1306 // The Java heap object sub-graph archiving process (see
1307 // WalkOopAndArchiveClosure):
1308 //
1309 // 1) Java object sub-graph archiving starts from a given static field
1310 // within a Class instance (java mirror). If the static field is a
1311 // reference field and points to a non-null java object, proceed to
1312 // the next step.
1313 //
1314 // 2) Archives the referenced java object. If an archived copy of the
1315 // current object already exists, updates the pointer in the archived
1316 // copy of the referencing object to point to the current archived object.
1317 // Otherwise, proceed to the next step.
1318 //
1319 // 3) Follows all references within the current java object and recursively
1320 // archive the sub-graph of objects starting from each reference.
1321 //
1322 // 4) Updates the pointer in the archived copy of referencing object to
1323 // point to the current archived object.
1324 //
1325 // 5) The Klass of the current java object is added to the list of Klasses
1326 // for loading and initializing before any object in the archived graph can
1327 // be accessed at runtime.
1328 //
1329 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1330                                                              const char* klass_name,
1331                                                              int field_offset,
1332                                                              const char* field_name,
1333                                                              bool is_closed_archive) {
1334   assert(DumpSharedSpaces, "dump time only");
1335   assert(k->is_shared_boot_class(), "must be boot class");
1336 
1337   oop m = k->java_mirror();
1338 
1339   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1340   oop f = m->obj_field(field_offset);
1341 
1342   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1343 
1344   if (!CompressedOops::is_null(f)) {
1345     if (log_is_enabled(Trace, cds, heap)) {
1346       LogTarget(Trace, cds, heap) log;
1347       LogStream out(log);
1348       f->print_on(&out);
1349     }
1350 
1351     oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1352 
1353     if (af == NULL) {
1354       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1355                            klass_name, field_name);
1356     } else {
1357       // Note: the field value is not preserved in the archived mirror.
1358       // Record the field as a new subGraph entry point. The recorded
1359       // information is restored from the archive at runtime.
1360       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1361       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
1362     }
1363   } else {
1364     // The field contains null, we still need to record the entry point,
1365     // so it can be restored at runtime.
1366     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
1367   }
1368 }
1369 
1370 #ifndef PRODUCT
1371 class VerifySharedOopClosure: public BasicOopIterateClosure {
1372  private:
1373   bool _is_archived;
1374 
1375  public:
1376   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1377 
1378   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1379   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1380 
1381  protected:
1382   template <class T> void do_oop_work(T *p) {
1383     oop obj = RawAccess<>::oop_load(p);
1384     if (!CompressedOops::is_null(obj)) {
1385       HeapShared::verify_reachable_objects_from(obj, _is_archived);
1386     }
1387   }
1388 };
1389 
1390 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1391   assert(DumpSharedSpaces, "dump time only");
1392   assert(k->is_shared_boot_class(), "must be boot class");
1393 
1394   oop m = k->java_mirror();
1395   oop f = m->obj_field(field_offset);
1396   if (!CompressedOops::is_null(f)) {
1397     verify_subgraph_from(f);
1398   }
1399 }
1400 
1401 void HeapShared::verify_subgraph_from(oop orig_obj) {
1402   oop archived_obj = find_archived_heap_object(orig_obj);
1403   if (archived_obj == NULL) {
1404     // It's OK for the root of a subgraph to be not archived. See comments in
1405     // archive_reachable_objects_from().
1406     return;
1407   }
1408 
1409   // Verify that all objects reachable from orig_obj are archived.
1410   init_seen_objects_table();
1411   verify_reachable_objects_from(orig_obj, false);
1412   delete_seen_objects_table();
1413 
1414   // Note: we could also verify that all objects reachable from the archived
1415   // copy of orig_obj can only point to archived objects, with:
1416   //      init_seen_objects_table();
1417   //      verify_reachable_objects_from(archived_obj, true);
1418   //      init_seen_objects_table();
1419   // but that's already done in G1HeapVerifier::verify_archive_regions so we
1420   // won't do it here.
1421 }
1422 
1423 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1424   _num_total_verifications ++;
1425   if (!has_been_seen_during_subgraph_recording(obj)) {
1426     set_has_been_seen_during_subgraph_recording(obj);
1427 
1428     if (is_archived) {
1429       assert(is_archived_object_during_dumptime(obj), "must be");
1430       assert(find_archived_heap_object(obj) == NULL, "must be");
1431     } else {
1432       assert(!is_archived_object_during_dumptime(obj), "must be");
1433       assert(find_archived_heap_object(obj) != NULL, "must be");
1434     }
1435 
1436     VerifySharedOopClosure walker(is_archived);
1437     obj->oop_iterate(&walker);
1438   }
1439 }
1440 #endif
1441 
1442 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
1443 int HeapShared::_num_new_walked_objs;
1444 int HeapShared::_num_new_archived_objs;
1445 int HeapShared::_num_old_recorded_klasses;
1446 
1447 int HeapShared::_num_total_subgraph_recordings = 0;
1448 int HeapShared::_num_total_walked_objs = 0;
1449 int HeapShared::_num_total_archived_objs = 0;
1450 int HeapShared::_num_total_recorded_klasses = 0;
1451 int HeapShared::_num_total_verifications = 0;
1452 
1453 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1454   return _seen_objects_table->get(obj) != NULL;
1455 }
1456 
1457 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1458   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1459   _seen_objects_table->put(obj, true);
1460   ++ _num_new_walked_objs;
1461 }
1462 
1463 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1464   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1465   init_subgraph_info(k, is_full_module_graph);
1466   init_seen_objects_table();
1467   _num_new_walked_objs = 0;
1468   _num_new_archived_objs = 0;
1469   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1470 }
1471 
1472 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1473   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1474     _num_old_recorded_klasses;
1475   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1476                       "walked %d objs, archived %d new objs, recorded %d classes",
1477                       class_name, _num_new_walked_objs, _num_new_archived_objs,
1478                       num_new_recorded_klasses);
1479 
1480   delete_seen_objects_table();
1481 
1482   _num_total_subgraph_recordings ++;
1483   _num_total_walked_objs      += _num_new_walked_objs;
1484   _num_total_archived_objs    += _num_new_archived_objs;
1485   _num_total_recorded_klasses +=  num_new_recorded_klasses;
1486 }
1487 
1488 class ArchivableStaticFieldFinder: public FieldClosure {
1489   InstanceKlass* _ik;
1490   Symbol* _field_name;
1491   bool _found;
1492   int _offset;
1493 public:
1494   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1495     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1496 
1497   virtual void do_field(fieldDescriptor* fd) {
1498     if (fd->name() == _field_name) {
1499       assert(!_found, "fields can never be overloaded");
1500       if (is_reference_type(fd->field_type())) {
1501         _found = true;
1502         _offset = fd->offset();
1503       }
1504     }
1505   }
1506   bool found()     { return _found;  }
1507   int offset()     { return _offset; }
1508 };
1509 
1510 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1511                                             TRAPS) {
1512   for (int i = 0; fields[i].valid(); i++) {
1513     ArchivableStaticFieldInfo* info = &fields[i];
1514     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1515     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1516     ResourceMark rm; // for stringStream::as_string() etc.
1517 
1518 #ifndef PRODUCT
1519     bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
1520 #else
1521     bool is_test_class = false;
1522 #endif
1523 
1524     if (is_test_class) {
1525       log_warning(cds)("Loading ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
1526     }
1527 
1528     Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
1529     if (HAS_PENDING_EXCEPTION) {
1530       CLEAR_PENDING_EXCEPTION;
1531       stringStream st;
1532       st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
1533       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1534     }
1535 
1536     if (!k->is_instance_klass()) {
1537       stringStream st;
1538       st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
1539       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1540     }
1541 
1542     InstanceKlass* ik = InstanceKlass::cast(k);
1543     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1544            "Only support boot classes");
1545 
1546     if (is_test_class) {
1547       if (ik->module()->is_named()) {
1548         // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary
1549         // core-lib classes. You need to at least append to the bootclasspath.
1550         stringStream st;
1551         st.print("ArchiveHeapTestClass %s is not in unnamed module", ArchiveHeapTestClass);
1552         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1553       }
1554 
1555       if (ik->package() != NULL) {
1556         // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
1557         stringStream st;
1558         st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass);
1559         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1560       }
1561     } else {
1562       if (ik->module()->name() != vmSymbols::java_base()) {
1563         // We don't want to deal with cases when a module is unavailable at runtime.
1564         // FUTURE -- load from archived heap only when module graph has not changed
1565         //           between dump and runtime.
1566         stringStream st;
1567         st.print("%s is not in java.base module", info->klass_name);
1568         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1569       }
1570     }
1571 
1572     if (is_test_class) {
1573       log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
1574     }
1575     ik->initialize(CHECK);
1576 
1577     ArchivableStaticFieldFinder finder(ik, field_name);
1578     ik->do_local_static_fields(&finder);
1579     if (!finder.found()) {
1580       stringStream st;
1581       st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name);
1582       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1583     }
1584 
1585     info->klass = ik;
1586     info->offset = finder.offset();
1587   }
1588 }
1589 
1590 void HeapShared::init_subgraph_entry_fields(TRAPS) {
1591   assert(HeapShared::can_write(), "must be");
1592   _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable();
1593   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK);
1594   init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK);
1595   if (MetaspaceShared::use_full_module_graph()) {
1596     init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK);
1597   }
1598 }
1599 
1600 #ifndef PRODUCT
1601 void HeapShared::setup_test_class(const char* test_class_name) {
1602   ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields;
1603   int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
1604   assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below");
1605   assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list");
1606 
1607   if (test_class_name != NULL) {
1608     p[num_slots - 2].klass_name = test_class_name;
1609     p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
1610   }
1611 }
1612 
1613 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass
1614 // during runtime. This may be called before the module system is initialized so
1615 // we cannot rely on InstanceKlass::module(), etc.
1616 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
1617   if (_test_class != NULL) {
1618     if (ik == _test_class) {
1619       return true;
1620     }
1621     Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
1622     if (klasses == NULL) {
1623       return false;
1624     }
1625 
1626     for (int i = 0; i < klasses->length(); i++) {
1627       Klass* k = klasses->at(i);
1628       if (k == ik) {
1629         Symbol* name;
1630         if (k->is_instance_klass()) {
1631           name = InstanceKlass::cast(k)->name();
1632         } else if (k->is_objArray_klass()) {
1633           Klass* bk = ObjArrayKlass::cast(k)->bottom_klass();
1634           if (!bk->is_instance_klass()) {
1635             return false;
1636           }
1637           name = bk->name();
1638         } else {
1639           return false;
1640         }
1641 
1642         // See KlassSubGraphInfo::check_allowed_klass() - only two types of
1643         // classes are allowed:
1644         //   (A) java.base classes (which must not be in the unnamed module)
1645         //   (B) test classes which must be in the unnamed package of the unnamed module.
1646         // So if we see a '/' character in the class name, it must be in (A);
1647         // otherwise it must be in (B).
1648         if (name->index_of_at(0, "/", 1)  >= 0) {
1649           return false; // (A)
1650         }
1651 
1652         return true; // (B)
1653       }
1654     }
1655   }
1656 
1657   return false;
1658 }
1659 #endif
1660 
1661 void HeapShared::init_for_dumping(TRAPS) {
1662   if (HeapShared::can_write()) {
1663     setup_test_class(ArchiveHeapTestClass);
1664     _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings();
1665     _native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048);
1666     init_subgraph_entry_fields(CHECK);
1667   }
1668 }
1669 
1670 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1671                                           bool is_closed_archive,
1672                                           bool is_full_module_graph) {
1673   _num_total_subgraph_recordings = 0;
1674   _num_total_walked_objs = 0;
1675   _num_total_archived_objs = 0;
1676   _num_total_recorded_klasses = 0;
1677   _num_total_verifications = 0;
1678 
1679   // For each class X that has one or more archived fields:
1680   // [1] Dump the subgraph of each archived field
1681   // [2] Create a list of all the class of the objects that can be reached
1682   //     by any of these static fields.
1683   //     At runtime, these classes are initialized before X's archived fields
1684   //     are restored by HeapShared::initialize_from_archived_subgraph().
1685   int i;
1686   for (int i = 0; fields[i].valid(); ) {
1687     ArchivableStaticFieldInfo* info = &fields[i];
1688     const char* klass_name = info->klass_name;
1689     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1690 
1691     // If you have specified consecutive fields of the same klass in
1692     // fields[], these will be archived in the same
1693     // {start_recording_subgraph ... done_recording_subgraph} pass to
1694     // save time.
1695     for (; fields[i].valid(); i++) {
1696       ArchivableStaticFieldInfo* f = &fields[i];
1697       if (f->klass_name != klass_name) {
1698         break;
1699       }
1700 
1701       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1702                                                   f->offset, f->field_name,
1703                                                   is_closed_archive);
1704     }
1705     done_recording_subgraph(info->klass, klass_name);
1706   }
1707 
1708   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1709                       is_closed_archive ? "closed" : "open",
1710                       _num_total_subgraph_recordings);
1711   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1712   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1713   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1714 
1715 #ifndef PRODUCT
1716   for (int i = 0; fields[i].valid(); i++) {
1717     ArchivableStaticFieldInfo* f = &fields[i];
1718     verify_subgraph_from_static_field(f->klass, f->offset);
1719   }
1720   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1721 #endif
1722 }
1723 
1724 // Not all the strings in the global StringTable are dumped into the archive, because
1725 // some of those strings may be only referenced by classes that are excluded from
1726 // the archive. We need to explicitly mark the strings that are:
1727 //   [1] used by classes that WILL be archived;
1728 //   [2] included in the SharedArchiveConfigFile.
1729 void HeapShared::add_to_dumped_interned_strings(oop string) {
1730   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1731   bool created;
1732   _dumped_interned_strings->put_if_absent(string, true, &created);
1733 }
1734 
1735 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1736 // region. This way we can quickly relocate all the pointers without using
1737 // BasicOopIterateClosure at runtime.
1738 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1739   void* _start;
1740   BitMap *_oopmap;
1741   int _num_total_oops;
1742   int _num_null_oops;
1743  public:
1744   FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
1745     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1746 
1747   virtual void do_oop(narrowOop* p) {
1748     assert(UseCompressedOops, "sanity");
1749     _num_total_oops ++;
1750     narrowOop v = *p;
1751     if (!CompressedOops::is_null(v)) {
1752       // Note: HeapShared::to_requested_address() is not necessary because
1753       // the heap always starts at a deterministic address with UseCompressedOops==true.
1754       size_t idx = p - (narrowOop*)_start;
1755       _oopmap->set_bit(idx);
1756     } else {
1757       _num_null_oops ++;
1758     }
1759   }
1760   virtual void do_oop(oop* p) {
1761     assert(!UseCompressedOops, "sanity");
1762     _num_total_oops ++;
1763     if ((*p) != NULL) {
1764       size_t idx = p - (oop*)_start;
1765       _oopmap->set_bit(idx);
1766       if (DumpSharedSpaces) {
1767         // Make heap content deterministic.
1768         *p = HeapShared::to_requested_address(*p);
1769       }
1770     } else {
1771       _num_null_oops ++;
1772     }
1773   }
1774   int num_total_oops() const { return _num_total_oops; }
1775   int num_null_oops()  const { return _num_null_oops; }
1776 };
1777 
1778 
1779 address HeapShared::to_requested_address(address dumptime_addr) {
1780   assert(DumpSharedSpaces, "static dump time only");
1781   if (dumptime_addr == NULL || UseCompressedOops) {
1782     return dumptime_addr;
1783   }
1784 
1785   // With UseCompressedOops==false, actual_base is selected by the OS so
1786   // it's different across -Xshare:dump runs.
1787   address actual_base = (address)G1CollectedHeap::heap()->reserved().start();
1788   address actual_end  = (address)G1CollectedHeap::heap()->reserved().end();
1789   assert(actual_base <= dumptime_addr && dumptime_addr <= actual_end, "must be an address in the heap");
1790 
1791   // We always write the objects as if the heap started at this address. This
1792   // makes the heap content deterministic.
1793   //
1794   // Note that at runtime, the heap address is also selected by the OS, so
1795   // the archive heap will not be mapped at 0x10000000. Instead, we will call
1796   // HeapShared::patch_embedded_pointers() to relocate the heap contents
1797   // accordingly.
1798   const address REQUESTED_BASE = (address)0x10000000;
1799   intx delta = REQUESTED_BASE - actual_base;
1800 
1801   address requested_addr = dumptime_addr + delta;
1802   assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity");
1803   return requested_addr;
1804 }
1805 
1806 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1807   size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
1808   ResourceBitMap oopmap(num_bits);
1809 
1810   HeapWord* p   = region.start();
1811   HeapWord* end = region.end();
1812   FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
1813   ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
1814 
1815   int num_objs = 0;
1816   while (p < end) {
1817     oop o = cast_to_oop(p);
1818     o->oop_iterate(&finder);
1819     p += o->size();
1820     if (DumpSharedSpaces) {
1821       builder->relocate_klass_ptr_of_oop(o);
1822     }
1823     ++ num_objs;
1824   }
1825 
1826   log_info(cds, heap)("calculate_oopmap: objects = %6d, oop fields = %7d (nulls = %7d)",
1827                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1828   return oopmap;
1829 }
1830 
1831 
1832 ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
1833   size_t num_bits = region.byte_size() / sizeof(Metadata*);
1834   ResourceBitMap oopmap(num_bits);
1835 
1836   Metadata** start = (Metadata**)region.start();
1837   Metadata** end   = (Metadata**)region.end();
1838 
1839   int num_non_null_ptrs = 0;
1840   int len = _native_pointers->length();
1841   for (int i = 0; i < len; i++) {
1842     Metadata** p = _native_pointers->at(i);
1843     if (start <= p && p < end) {
1844       assert(*p != NULL, "must be non-null");
1845       num_non_null_ptrs ++;
1846       size_t idx = p - start;
1847       oopmap.set_bit(idx);
1848     }
1849   }
1850 
1851   log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of "
1852                       SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits);
1853   if (num_non_null_ptrs > 0) {
1854     return oopmap;
1855   } else {
1856     return ResourceBitMap(0);
1857   }
1858 }
1859 
1860 #endif // INCLUDE_CDS_JAVA_HEAP