1 /*
   2  * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/archiveBuilder.hpp"
  27 #include "cds/archiveHeapLoader.hpp"
  28 #include "cds/archiveUtils.hpp"
  29 #include "cds/cdsHeapVerifier.hpp"
  30 #include "cds/heapShared.hpp"
  31 #include "cds/metaspaceShared.hpp"
  32 #include "classfile/classLoaderData.hpp"
  33 #include "classfile/javaClasses.inline.hpp"
  34 #include "classfile/modules.hpp"
  35 #include "classfile/stringTable.hpp"
  36 #include "classfile/symbolTable.hpp"
  37 #include "classfile/systemDictionary.hpp"
  38 #include "classfile/systemDictionaryShared.hpp"
  39 #include "classfile/vmClasses.hpp"
  40 #include "classfile/vmSymbols.hpp"
  41 #include "gc/shared/collectedHeap.hpp"
  42 #include "gc/shared/gcLocker.hpp"
  43 #include "gc/shared/gcVMOperations.hpp"
  44 #include "logging/log.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "memory/iterator.inline.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "memory/universe.hpp"
  49 #include "oops/compressedOops.inline.hpp"
  50 #include "oops/fieldStreams.inline.hpp"
  51 #include "oops/objArrayOop.inline.hpp"
  52 #include "oops/oop.inline.hpp"
  53 #include "oops/typeArrayOop.inline.hpp"
  54 #include "prims/jvmtiExport.hpp"
  55 #include "runtime/fieldDescriptor.inline.hpp"
  56 #include "runtime/init.hpp"
  57 #include "runtime/javaCalls.hpp"
  58 #include "runtime/safepointVerifiers.hpp"
  59 #include "utilities/bitMap.inline.hpp"
  60 #include "utilities/copy.hpp"
  61 #if INCLUDE_G1GC
  62 #include "gc/g1/g1CollectedHeap.hpp"
  63 #endif
  64 
  65 #if INCLUDE_CDS_JAVA_HEAP
  66 
  67 struct ArchivableStaticFieldInfo {
  68   const char* klass_name;
  69   const char* field_name;
  70   InstanceKlass* klass;
  71   int offset;
  72   BasicType type;
  73 
  74   ArchivableStaticFieldInfo(const char* k, const char* f)
  75   : klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {}
  76 
  77   bool valid() {
  78     return klass_name != NULL;
  79   }
  80 };
  81 
  82 bool HeapShared::_disable_writing = false;
  83 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL;
  84 GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL;
  85 
  86 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
  87 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
  88 size_t HeapShared::_total_obj_count;
  89 size_t HeapShared::_total_obj_size;
  90 
  91 #ifndef PRODUCT
  92 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
  93 static Array<char>* _archived_ArchiveHeapTestClass = NULL;
  94 static const char* _test_class_name = NULL;
  95 static const Klass* _test_class = NULL;
  96 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL;
  97 #endif
  98 
  99 
 100 //
 101 // If you add new entries to the following tables, you should know what you're doing!
 102 //
 103 
 104 // Entry fields for shareable subgraphs archived in the closed archive heap
 105 // region. Warning: Objects in the subgraphs should not have reference fields
 106 // assigned at runtime.
 107 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
 108   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 109   {"java/lang/Long$LongCache",                    "archivedCache"},
 110   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 111   {"java/lang/Short$ShortCache",                  "archivedCache"},
 112   {"java/lang/Character$CharacterCache",          "archivedCache"},
 113   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 114   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 115   {NULL, NULL},
 116 };
 117 // Entry fields for subgraphs archived in the open archive heap region.
 118 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
 119   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 120   {"java/util/ImmutableCollections",              "archivedObjects"},
 121   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 122   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 123   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 124 #ifndef PRODUCT
 125   {NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass
 126 #endif
 127   {NULL, NULL},
 128 };
 129 
 130 // Entry fields for subgraphs archived in the open archive heap region (full module graph).
 131 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
 132   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 133   {"jdk/internal/module/ArchivedBootLayer",       "archivedBootLayer"},
 134   {"java/lang/Module$ArchivedData",               "archivedData"},
 135   {NULL, NULL},
 136 };
 137 
 138 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL;
 139 OopHandle HeapShared::_roots;
 140 
 141 #ifdef ASSERT
 142 bool HeapShared::is_archived_object_during_dumptime(oop p) {
 143   assert(HeapShared::can_write(), "must be");
 144   assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
 145   return Universe::heap()->is_archived_object(p);
 146 }
 147 #endif
 148 
 149 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 150   for (int i = 0; fields[i].valid(); i++) {
 151     if (fields[i].klass == ik) {
 152       return true;
 153     }
 154   }
 155   return false;
 156 }
 157 
 158 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 159   return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) ||
 160          is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) ||
 161          is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik);
 162 }
 163 
 164 unsigned HeapShared::oop_hash(oop const& p) {
 165   // Do not call p->identity_hash() as that will update the
 166   // object header.
 167   return primitive_hash(cast_from_oop<intptr_t>(p));
 168 }
 169 
 170 static void reset_states(oop obj, TRAPS) {
 171   Handle h_obj(THREAD, obj);
 172   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 173   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 174   Symbol* method_sig = vmSymbols::void_method_signature();
 175 
 176   while (klass != NULL) {
 177     Method* method = klass->find_method(method_name, method_sig);
 178     if (method != NULL) {
 179       assert(method->is_private(), "must be");
 180       if (log_is_enabled(Debug, cds)) {
 181         ResourceMark rm(THREAD);
 182         log_debug(cds)("  calling %s", method->name_and_sig_as_C_string());
 183       }
 184       JavaValue result(T_VOID);
 185       JavaCalls::call_special(&result, h_obj, klass,
 186                               method_name, method_sig, CHECK);
 187     }
 188     klass = klass->java_super();
 189   }
 190 }
 191 
 192 void HeapShared::reset_archived_object_states(TRAPS) {
 193   assert(DumpSharedSpaces, "dump-time only");
 194   log_debug(cds)("Resetting platform loader");
 195   reset_states(SystemDictionary::java_platform_loader(), CHECK);
 196   log_debug(cds)("Resetting system loader");
 197   reset_states(SystemDictionary::java_system_loader(), CHECK);
 198 
 199   // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not
 200   // directly used for class loading, but rather is used by the core library
 201   // to keep track of resources, etc, loaded by the null class loader.
 202   //
 203   // Note, this object is non-null, and is not the same as
 204   // ClassLoaderData::the_null_class_loader_data()->class_loader(),
 205   // which is null.
 206   log_debug(cds)("Resetting boot loader");
 207   JavaValue result(T_OBJECT);
 208   JavaCalls::call_static(&result,
 209                          vmClasses::jdk_internal_loader_ClassLoaders_klass(),
 210                          vmSymbols::bootLoader_name(),
 211                          vmSymbols::void_BuiltinClassLoader_signature(),
 212                          CHECK);
 213   Handle boot_loader(THREAD, result.get_oop());
 214   reset_states(boot_loader(), CHECK);
 215 }
 216 
 217 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
 218 HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL;
 219 oop HeapShared::find_archived_heap_object(oop obj) {
 220   assert(DumpSharedSpaces, "dump-time only");
 221   ArchivedObjectCache* cache = archived_object_cache();
 222   CachedOopInfo* p = cache->get(obj);
 223   if (p != NULL) {
 224     return p->_obj;
 225   } else {
 226     return NULL;
 227   }
 228 }
 229 
 230 int HeapShared::append_root(oop obj) {
 231   assert(DumpSharedSpaces, "dump-time only");
 232 
 233   // No GC should happen since we aren't scanning _pending_roots.
 234   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 235 
 236   if (_pending_roots == NULL) {
 237     _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 238   }
 239 
 240   return _pending_roots->append(obj);
 241 }
 242 
 243 objArrayOop HeapShared::roots() {
 244   if (DumpSharedSpaces) {
 245     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 246     if (!HeapShared::can_write()) {
 247       return NULL;
 248     }
 249   } else {
 250     assert(UseSharedSpaces, "must be");
 251   }
 252 
 253   objArrayOop roots = (objArrayOop)_roots.resolve();
 254   assert(roots != NULL, "should have been initialized");
 255   return roots;
 256 }
 257 
 258 // Returns an objArray that contains all the roots of the archived objects
 259 oop HeapShared::get_root(int index, bool clear) {
 260   assert(index >= 0, "sanity");
 261   if (DumpSharedSpaces) {
 262     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 263     assert(_pending_roots != NULL, "sanity");
 264     return _pending_roots->at(index);
 265   } else {
 266     assert(UseSharedSpaces, "must be");
 267     assert(!_roots.is_empty(), "must have loaded shared heap");
 268     oop result = roots()->obj_at(index);
 269     if (clear) {
 270       clear_root(index);
 271     }
 272     return result;
 273   }
 274 }
 275 
 276 void HeapShared::clear_root(int index) {
 277   assert(index >= 0, "sanity");
 278   assert(UseSharedSpaces, "must be");
 279   if (ArchiveHeapLoader::is_fully_available()) {
 280     if (log_is_enabled(Debug, cds, heap)) {
 281       oop old = roots()->obj_at(index);
 282       log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
 283     }
 284     roots()->obj_at_put(index, NULL);
 285   }
 286 }
 287 
 288 oop HeapShared::archive_object(oop obj) {
 289   assert(DumpSharedSpaces, "dump-time only");
 290 
 291   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 292 
 293   oop ao = find_archived_heap_object(obj);
 294   if (ao != NULL) {
 295     // already archived
 296     return ao;
 297   }
 298 
 299   int len = obj->size();
 300   if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
 301     log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
 302                          p2i(obj), (size_t)obj->size());
 303     return NULL;
 304   }
 305 
 306   oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len));
 307   if (archived_oop != NULL) {
 308     count_allocation(len);
 309     Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
 310     // Reinitialize markword to remove age/marking/locking/etc.
 311     //
 312     // We need to retain the identity_hash, because it may have been used by some hashtables
 313     // in the shared heap. This also has the side effect of pre-initializing the
 314     // identity_hash for all shared objects, so they are less likely to be written
 315     // into during run time, increasing the potential of memory sharing.
 316     if (!(EnableValhalla && obj->mark().is_inline_type())) {
 317       int hash_original = obj->identity_hash();
 318       archived_oop->set_mark(archived_oop->klass()->prototype_header().copy_set_hash(hash_original));
 319       assert(archived_oop->mark().is_unlocked(), "sanity");
 320 
 321       DEBUG_ONLY(int hash_archived = archived_oop->identity_hash());
 322       assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived);
 323     }
 324 
 325     ArchivedObjectCache* cache = archived_object_cache();
 326     CachedOopInfo info = make_cached_oop_info(archived_oop);
 327     cache->put(obj, info);
 328     if (_original_object_table != NULL) {
 329       _original_object_table->put(archived_oop, obj);
 330     }
 331     mark_native_pointers(obj, archived_oop);
 332     if (log_is_enabled(Debug, cds, heap)) {
 333       ResourceMark rm;
 334       log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s",
 335                            p2i(obj), p2i(archived_oop), obj->klass()->external_name());
 336     }
 337   } else {
 338     log_error(cds, heap)(
 339       "Cannot allocate space for object " PTR_FORMAT " in archived heap region",
 340       p2i(obj));
 341     log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = "
 342         SIZE_FORMAT "M", MaxHeapSize/M);
 343     os::_exit(-1);
 344   }
 345   return archived_oop;
 346 }
 347 
 348 void HeapShared::archive_klass_objects() {
 349   GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
 350   assert(klasses != NULL, "sanity");
 351   for (int i = 0; i < klasses->length(); i++) {
 352     Klass* k = ArchiveBuilder::get_buffered_klass(klasses->at(i));
 353 
 354     // archive mirror object
 355     java_lang_Class::archive_mirror(k);
 356 
 357     // archive the resolved_referenes array
 358     if (k->is_instance_klass()) {
 359       InstanceKlass* ik = InstanceKlass::cast(k);
 360       ik->constants()->archive_resolved_references();
 361     }
 362   }
 363 }
 364 
 365 void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) {
 366   if (java_lang_Class::is_instance(orig_obj)) {
 367     mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset());
 368     mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset());
 369   }
 370 }
 371 
 372 void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) {
 373   Metadata* ptr = archived_obj->metadata_field_acquire(offset);
 374   if (ptr != NULL) {
 375     // Set the native pointer to the requested address (at runtime, if the metadata
 376     // is mapped at the default location, it will be at this address).
 377     address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr);
 378     address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr);
 379     archived_obj->metadata_field_put(offset, (Metadata*)requested_addr);
 380 
 381     // Remember this pointer. At runtime, if the metadata is mapped at a non-default
 382     // location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()).
 383     _native_pointers->append(archived_obj->field_addr<Metadata*>(offset));
 384 
 385     log_debug(cds, heap, mirror)(
 386         "Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT,
 387          offset, p2i(ptr), p2i(requested_addr));
 388   }
 389 }
 390 
 391 // -- Handling of Enum objects
 392 // Java Enum classes have synthetic <clinit> methods that look like this
 393 //     enum MyEnum {FOO, BAR}
 394 //     MyEnum::<clinint> {
 395 //        /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO");
 396 //        /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR");
 397 //     }
 398 //
 399 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must
 400 // ensure the archived value equals (in object address) to the runtime value of
 401 // MyEnum::FOO.
 402 //
 403 // However, since MyEnum::<clinint> is synthetically generated by javac, there's
 404 // no way of programmatically handling this inside the Java code (as you would handle
 405 // ModuleLayer::EMPTY_LAYER, for example).
 406 //
 407 // Instead, we archive all static field of such Enum classes. At runtime,
 408 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull
 409 // the static fields out of the archived heap.
 410 void HeapShared::check_enum_obj(int level,
 411                                 KlassSubGraphInfo* subgraph_info,
 412                                 oop orig_obj,
 413                                 bool is_closed_archive) {
 414   Klass* k = orig_obj->klass();
 415   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
 416   if (!k->is_instance_klass()) {
 417     return;
 418   }
 419   InstanceKlass* ik = InstanceKlass::cast(k);
 420   if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) {
 421     ResourceMark rm;
 422     ik->set_has_archived_enum_objs();
 423     buffered_k->set_has_archived_enum_objs();
 424     oop mirror = ik->java_mirror();
 425 
 426     for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
 427       if (fs.access_flags().is_static()) {
 428         fieldDescriptor& fd = fs.field_descriptor();
 429         if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) {
 430           guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY",
 431                     ik->external_name(), fd.name()->as_C_string());
 432         }
 433         oop oop_field = mirror->obj_field(fd.offset());
 434         if (oop_field == NULL) {
 435           guarantee(false, "static field %s::%s must not be null",
 436                     ik->external_name(), fd.name()->as_C_string());
 437         } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) {
 438           guarantee(false, "static field %s::%s is of the wrong type",
 439                     ik->external_name(), fd.name()->as_C_string());
 440         }
 441         oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
 442         int root_index = append_root(archived_oop_field);
 443         log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")",
 444                             root_index, ik->external_name(), fd.name()->as_C_string(),
 445                             p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field));
 446         SystemDictionaryShared::add_enum_klass_static_field(ik, root_index);
 447       }
 448     }
 449   }
 450 }
 451 
 452 // See comments in HeapShared::check_enum_obj()
 453 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
 454   if (!ArchiveHeapLoader::is_fully_available()) {
 455     return false;
 456   }
 457 
 458   RunTimeClassInfo* info = RunTimeClassInfo::get_for(k);
 459   assert(info != NULL, "sanity");
 460 
 461   if (log_is_enabled(Info, cds, heap)) {
 462     ResourceMark rm;
 463     log_info(cds, heap)("Initializing Enum class: %s", k->external_name());
 464   }
 465 
 466   oop mirror = k->java_mirror();
 467   int i = 0;
 468   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
 469     if (fs.access_flags().is_static()) {
 470       int root_index = info->enum_klass_static_field_root_index_at(i++);
 471       fieldDescriptor& fd = fs.field_descriptor();
 472       assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be");
 473       mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true));
 474     }
 475   }
 476   return true;
 477 }
 478 
 479 void HeapShared::run_full_gc_in_vm_thread() {
 480   if (HeapShared::can_write()) {
 481     // Avoid fragmentation while archiving heap objects.
 482     // We do this inside a safepoint, so that no further allocation can happen after GC
 483     // has finished.
 484     if (GCLocker::is_active()) {
 485       // Just checking for safety ...
 486       // This should not happen during -Xshare:dump. If you see this, probably the Java core lib
 487       // has been modified such that JNI code is executed in some clean up threads after
 488       // we have finished class loading.
 489       log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results.");
 490     } else {
 491       log_info(cds)("Run GC ...");
 492       Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc);
 493       log_info(cds)("Run GC done");
 494     }
 495   }
 496 }
 497 
 498 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
 499                                  GrowableArray<MemRegion>* open_regions) {
 500 
 501   G1HeapVerifier::verify_ready_for_archiving();
 502 
 503   {
 504     NoSafepointVerifier nsv;
 505 
 506     // Cache for recording where the archived objects are copied to
 507     create_archived_object_cache(log_is_enabled(Info, cds, map));
 508 
 509     log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 510                    UseCompressedOops ? p2i(CompressedOops::begin()) :
 511                                        p2i((address)G1CollectedHeap::heap()->reserved().start()),
 512                    UseCompressedOops ? p2i(CompressedOops::end()) :
 513                                        p2i((address)G1CollectedHeap::heap()->reserved().end()));
 514     log_info(cds)("Dumping objects to closed archive heap region ...");
 515     copy_closed_objects(closed_regions);
 516 
 517     log_info(cds)("Dumping objects to open archive heap region ...");
 518     copy_open_objects(open_regions);
 519 
 520     CDSHeapVerifier::verify();
 521   }
 522 
 523   G1HeapVerifier::verify_archive_regions();
 524 }
 525 
 526 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) {
 527   assert(HeapShared::can_write(), "must be");
 528 
 529   G1CollectedHeap::heap()->begin_archive_alloc_range();
 530 
 531   // Archive interned string objects
 532   StringTable::write_to_archive(_dumped_interned_strings);
 533 
 534   archive_object_subgraphs(closed_archive_subgraph_entry_fields,
 535                            true /* is_closed_archive */,
 536                            false /* is_full_module_graph */);
 537 
 538   G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions,
 539                                                    os::vm_allocation_granularity());
 540 }
 541 
 542 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) {
 543   assert(HeapShared::can_write(), "must be");
 544 
 545   G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
 546 
 547   java_lang_Class::archive_basic_type_mirrors();
 548 
 549   archive_klass_objects();
 550 
 551   archive_object_subgraphs(open_archive_subgraph_entry_fields,
 552                            false /* is_closed_archive */,
 553                            false /* is_full_module_graph */);
 554   if (MetaspaceShared::use_full_module_graph()) {
 555     archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
 556                              false /* is_closed_archive */,
 557                              true /* is_full_module_graph */);
 558     Modules::verify_archived_modules();
 559   }
 560 
 561   copy_roots();
 562 
 563   G1CollectedHeap::heap()->end_archive_alloc_range(open_regions,
 564                                                    os::vm_allocation_granularity());
 565 }
 566 
 567 // Copy _pending_archive_roots into an objArray
 568 void HeapShared::copy_roots() {
 569   // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the
 570   // objects in this array are discovered during HeapShared::archive_objects(). For example,
 571   // in HeapShared::archive_reachable_objects_from() ->  HeapShared::check_enum_obj().
 572   // However, HeapShared::archive_objects() happens inside a safepoint, so we can't
 573   // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object().
 574   // Instead, we have to roll our own alloc/copy routine here.
 575   int length = _pending_roots != NULL ? _pending_roots->length() : 0;
 576   size_t size = objArrayOopDesc::object_size(length);
 577   Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
 578   HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size);
 579 
 580   memset(mem, 0, size * BytesPerWord);
 581   {
 582     // This is copied from MemAllocator::finish
 583     oopDesc::set_mark(mem, k->prototype_header());
 584     oopDesc::release_set_klass(mem, k);
 585   }
 586   {
 587     // This is copied from ObjArrayAllocator::initialize
 588     arrayOopDesc::set_length(mem, length);
 589   }
 590 
 591   _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem));
 592   for (int i = 0; i < length; i++) {
 593     roots()->obj_at_put(i, _pending_roots->at(i));
 594   }
 595   log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem);
 596   count_allocation(roots()->size());
 597 }
 598 
 599 //
 600 // Subgraph archiving support
 601 //
 602 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
 603 HeapShared::RunTimeKlassSubGraphInfoTable   HeapShared::_run_time_subgraph_info_table;
 604 
 605 // Get the subgraph_info for Klass k. A new subgraph_info is created if
 606 // there is no existing one for k. The subgraph_info records the "buffered"
 607 // address of the class.
 608 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
 609   assert(DumpSharedSpaces, "dump time only");
 610   bool created;
 611   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
 612   KlassSubGraphInfo* info =
 613     _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph),
 614                                                   &created);
 615   assert(created, "must not initialize twice");
 616   return info;
 617 }
 618 
 619 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 620   assert(DumpSharedSpaces, "dump time only");
 621   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
 622   assert(info != NULL, "must have been initialized");
 623   return info;
 624 }
 625 
 626 // Add an entry field to the current KlassSubGraphInfo.
 627 void KlassSubGraphInfo::add_subgraph_entry_field(
 628       int static_field_offset, oop v, bool is_closed_archive) {
 629   assert(DumpSharedSpaces, "dump time only");
 630   if (_subgraph_entry_fields == NULL) {
 631     _subgraph_entry_fields =
 632       new (mtClass) GrowableArray<int>(10, mtClass);
 633   }
 634   _subgraph_entry_fields->append(static_field_offset);
 635   _subgraph_entry_fields->append(HeapShared::append_root(v));
 636 }
 637 
 638 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 639 // Only objects of boot classes can be included in sub-graph.
 640 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
 641   assert(DumpSharedSpaces, "dump time only");
 642   Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
 643 
 644   if (_subgraph_object_klasses == NULL) {
 645     _subgraph_object_klasses =
 646       new (mtClass) GrowableArray<Klass*>(50, mtClass);
 647   }
 648 
 649   assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
 650 
 651   if (_k == buffered_k) {
 652     // Don't add the Klass containing the sub-graph to it's own klass
 653     // initialization list.
 654     return;
 655   }
 656 
 657   if (buffered_k->is_instance_klass()) {
 658     assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
 659           "must be boot class");
 660     // vmClasses::xxx_klass() are not updated, need to check
 661     // the original Klass*
 662     if (orig_k == vmClasses::String_klass() ||
 663         orig_k == vmClasses::Object_klass()) {
 664       // Initialized early during VM initialization. No need to be added
 665       // to the sub-graph object class list.
 666       return;
 667     }
 668     check_allowed_klass(InstanceKlass::cast(orig_k));
 669   } else if (buffered_k->is_objArray_klass()) {
 670     Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
 671     if (abk->is_instance_klass()) {
 672       assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
 673             "must be boot class");
 674       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
 675     }
 676     if (buffered_k == Universe::objectArrayKlassObj()) {
 677       // Initialized early during Universe::genesis. No need to be added
 678       // to the list.
 679       return;
 680     }
 681   } else {
 682     assert(buffered_k->is_typeArray_klass(), "must be");
 683     // Primitive type arrays are created early during Universe::genesis.
 684     return;
 685   }
 686 
 687   if (log_is_enabled(Debug, cds, heap)) {
 688     if (!_subgraph_object_klasses->contains(buffered_k)) {
 689       ResourceMark rm;
 690       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
 691     }
 692   }
 693 
 694   _subgraph_object_klasses->append_if_missing(buffered_k);
 695   _has_non_early_klasses |= is_non_early_klass(orig_k);
 696 }
 697 
 698 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
 699   if (ik->module()->name() == vmSymbols::java_base()) {
 700     assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package");
 701     return;
 702   }
 703 
 704 #ifndef PRODUCT
 705   if (!ik->module()->is_named() && ik->package() == NULL) {
 706     // This class is loaded by ArchiveHeapTestClass
 707     return;
 708   }
 709   const char* extra_msg = ", or in an unnamed package of an unnamed module";
 710 #else
 711   const char* extra_msg = "";
 712 #endif
 713 
 714   ResourceMark rm;
 715   log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
 716                        ik->external_name(), extra_msg);
 717   os::_exit(1);
 718 }
 719 
 720 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) {
 721   if (k->is_objArray_klass()) {
 722     k = ObjArrayKlass::cast(k)->bottom_klass();
 723   }
 724   if (k->is_instance_klass()) {
 725     if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) {
 726       ResourceMark rm;
 727       log_info(cds, heap)("non-early: %s", k->external_name());
 728       return true;
 729     } else {
 730       return false;
 731     }
 732   } else {
 733     return false;
 734   }
 735 }
 736 
 737 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo.
 738 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) {
 739   _k = info->klass();
 740   _entry_field_records = NULL;
 741   _subgraph_object_klasses = NULL;
 742   _is_full_module_graph = info->is_full_module_graph();
 743 
 744   if (_is_full_module_graph) {
 745     // Consider all classes referenced by the full module graph as early -- we will be
 746     // allocating objects of these classes during JVMTI early phase, so they cannot
 747     // be processed by (non-early) JVMTI ClassFileLoadHook
 748     _has_non_early_klasses = false;
 749   } else {
 750     _has_non_early_klasses = info->has_non_early_klasses();
 751   }
 752 
 753   if (_has_non_early_klasses) {
 754     ResourceMark rm;
 755     log_info(cds, heap)(
 756           "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled",
 757           _k->external_name());
 758   }
 759 
 760   // populate the entry fields
 761   GrowableArray<int>* entry_fields = info->subgraph_entry_fields();
 762   if (entry_fields != NULL) {
 763     int num_entry_fields = entry_fields->length();
 764     assert(num_entry_fields % 2 == 0, "sanity");
 765     _entry_field_records =
 766       ArchiveBuilder::new_ro_array<int>(num_entry_fields);
 767     for (int i = 0 ; i < num_entry_fields; i++) {
 768       _entry_field_records->at_put(i, entry_fields->at(i));
 769     }
 770   }
 771 
 772   // the Klasses of the objects in the sub-graphs
 773   GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses();
 774   if (subgraph_object_klasses != NULL) {
 775     int num_subgraphs_klasses = subgraph_object_klasses->length();
 776     _subgraph_object_klasses =
 777       ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
 778     for (int i = 0; i < num_subgraphs_klasses; i++) {
 779       Klass* subgraph_k = subgraph_object_klasses->at(i);
 780       if (log_is_enabled(Info, cds, heap)) {
 781         ResourceMark rm;
 782         log_info(cds, heap)(
 783           "Archived object klass %s (%2d) => %s",
 784           _k->external_name(), i, subgraph_k->external_name());
 785       }
 786       _subgraph_object_klasses->at_put(i, subgraph_k);
 787       ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
 788     }
 789   }
 790 
 791   ArchivePtrMarker::mark_pointer(&_k);
 792   ArchivePtrMarker::mark_pointer(&_entry_field_records);
 793   ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
 794 }
 795 
 796 struct CopyKlassSubGraphInfoToArchive : StackObj {
 797   CompactHashtableWriter* _writer;
 798   CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
 799 
 800   bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
 801     if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) {
 802       ArchivedKlassSubGraphInfoRecord* record =
 803         (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
 804       record->init(&info);
 805 
 806       Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
 807       unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
 808       u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
 809       _writer->add(hash, delta);
 810     }
 811     return true; // keep on iterating
 812   }
 813 };
 814 
 815 // Build the records of archived subgraph infos, which include:
 816 // - Entry points to all subgraphs from the containing class mirror. The entry
 817 //   points are static fields in the mirror. For each entry point, the field
 818 //   offset, value and is_closed_archive flag are recorded in the sub-graph
 819 //   info. The value is stored back to the corresponding field at runtime.
 820 // - A list of klasses that need to be loaded/initialized before archived
 821 //   java object sub-graph can be accessed at runtime.
 822 void HeapShared::write_subgraph_info_table() {
 823   // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
 824   DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
 825   CompactHashtableStats stats;
 826 
 827   _run_time_subgraph_info_table.reset();
 828 
 829   CompactHashtableWriter writer(d_table->_count, &stats);
 830   CopyKlassSubGraphInfoToArchive copy(&writer);
 831   d_table->iterate(&copy);
 832   writer.dump(&_run_time_subgraph_info_table, "subgraphs");
 833 
 834 #ifndef PRODUCT
 835   if (ArchiveHeapTestClass != NULL) {
 836     size_t len = strlen(ArchiveHeapTestClass) + 1;
 837     Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
 838     strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
 839     _archived_ArchiveHeapTestClass = array;
 840   }
 841 #endif
 842   if (log_is_enabled(Info, cds, heap)) {
 843     print_stats();
 844   }
 845 }
 846 
 847 void HeapShared::serialize_root(SerializeClosure* soc) {
 848   oop roots_oop = NULL;
 849 
 850   if (soc->reading()) {
 851     soc->do_oop(&roots_oop); // read from archive
 852     assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
 853     // Create an OopHandle only if we have actually mapped or loaded the roots
 854     if (roots_oop != NULL) {
 855       assert(ArchiveHeapLoader::is_fully_available(), "must be");
 856       _roots = OopHandle(Universe::vm_global(), roots_oop);
 857     }
 858   } else {
 859     // writing
 860     roots_oop = roots();
 861     soc->do_oop(&roots_oop); // write to archive
 862   }
 863 }
 864 
 865 void HeapShared::serialize_tables(SerializeClosure* soc) {
 866 
 867 #ifndef PRODUCT
 868   soc->do_ptr((void**)&_archived_ArchiveHeapTestClass);
 869   if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) {
 870     _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
 871     setup_test_class(_test_class_name);
 872   }
 873 #endif
 874 
 875   _run_time_subgraph_info_table.serialize_header(soc);
 876 }
 877 
 878 static void verify_the_heap(Klass* k, const char* which) {
 879   if (VerifyArchivedFields > 0) {
 880     ResourceMark rm;
 881     log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
 882                         which, k->external_name());
 883 
 884     VM_Verify verify_op;
 885     VMThread::execute(&verify_op);
 886 
 887     if (VerifyArchivedFields > 1 && is_init_completed()) {
 888       // At this time, the oop->klass() of some archived objects in the heap may not
 889       // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
 890       // have enough information (object size, oop maps, etc) so that a GC can be safely
 891       // performed.
 892       //
 893       // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
 894       // to check for GC safety.
 895       log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
 896                           which, k->external_name());
 897       FlagSetting fs1(VerifyBeforeGC, true);
 898       FlagSetting fs2(VerifyDuringGC, true);
 899       FlagSetting fs3(VerifyAfterGC,  true);
 900       Universe::heap()->collect(GCCause::_java_lang_system_gc);
 901     }
 902   }
 903 }
 904 
 905 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
 906 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
 907 //
 908 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
 909 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
 910 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
 911 void HeapShared::resolve_classes(JavaThread* current) {
 912   assert(UseSharedSpaces, "runtime only!");
 913   if (!ArchiveHeapLoader::is_fully_available()) {
 914     return; // nothing to do
 915   }
 916   resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields);
 917   resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields);
 918   resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields);
 919 }
 920 
 921 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
 922   for (int i = 0; fields[i].valid(); i++) {
 923     ArchivableStaticFieldInfo* info = &fields[i];
 924     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
 925     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
 926     assert(k != NULL && k->is_shared_boot_class(), "sanity");
 927     resolve_classes_for_subgraph_of(current, k);
 928   }
 929 }
 930 
 931 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
 932   JavaThread* THREAD = current;
 933   ExceptionMark em(THREAD);
 934   const ArchivedKlassSubGraphInfoRecord* record =
 935    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
 936   if (HAS_PENDING_EXCEPTION) {
 937    CLEAR_PENDING_EXCEPTION;
 938   }
 939   if (record == NULL) {
 940    clear_archived_roots_of(k);
 941   }
 942 }
 943 
 944 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
 945   JavaThread* THREAD = current;
 946   if (!ArchiveHeapLoader::is_fully_available()) {
 947     return; // nothing to do
 948   }
 949 
 950   ExceptionMark em(THREAD);
 951   const ArchivedKlassSubGraphInfoRecord* record =
 952     resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
 953 
 954   if (HAS_PENDING_EXCEPTION) {
 955     CLEAR_PENDING_EXCEPTION;
 956     // None of the field value will be set if there was an exception when initializing the classes.
 957     // The java code will not see any of the archived objects in the
 958     // subgraphs referenced from k in this case.
 959     return;
 960   }
 961 
 962   if (record != NULL) {
 963     init_archived_fields_for(k, record);
 964   }
 965 }
 966 
 967 const ArchivedKlassSubGraphInfoRecord*
 968 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
 969   assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
 970 
 971   if (!k->is_shared()) {
 972     return NULL;
 973   }
 974   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
 975   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
 976 
 977 #ifndef PRODUCT
 978   if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) {
 979     _test_class = k;
 980     _test_class_record = record;
 981   }
 982 #endif
 983 
 984   // Initialize from archived data. Currently this is done only
 985   // during VM initialization time. No lock is needed.
 986   if (record != NULL) {
 987     if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) {
 988       if (log_is_enabled(Info, cds, heap)) {
 989         ResourceMark rm(THREAD);
 990         log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
 991                             k->external_name());
 992       }
 993       return NULL;
 994     }
 995 
 996     if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
 997       if (log_is_enabled(Info, cds, heap)) {
 998         ResourceMark rm(THREAD);
 999         log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
1000                             k->external_name());
1001       }
1002       return NULL;
1003     }
1004 
1005     if (log_is_enabled(Info, cds, heap)) {
1006       ResourceMark rm;
1007       log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1008     }
1009 
1010     resolve_or_init(k, do_init, CHECK_NULL);
1011 
1012     // Load/link/initialize the klasses of the objects in the subgraph.
1013     // NULL class loader is used.
1014     Array<Klass*>* klasses = record->subgraph_object_klasses();
1015     if (klasses != NULL) {
1016       for (int i = 0; i < klasses->length(); i++) {
1017         Klass* klass = klasses->at(i);
1018         if (!klass->is_shared()) {
1019           return NULL;
1020         }
1021         resolve_or_init(klass, do_init, CHECK_NULL);
1022       }
1023     }
1024   }
1025 
1026   return record;
1027 }
1028 
1029 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1030   if (!do_init) {
1031     if (k->class_loader_data() == NULL) {
1032       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1033       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1034     }
1035   } else {
1036     assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes");
1037     if (k->is_instance_klass()) {
1038       InstanceKlass* ik = InstanceKlass::cast(k);
1039       ik->initialize(CHECK);
1040     } else if (k->is_objArray_klass()) {
1041       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1042       oak->initialize(CHECK);
1043     }
1044   }
1045 }
1046 
1047 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1048   verify_the_heap(k, "before");
1049 
1050   // Load the subgraph entry fields from the record and store them back to
1051   // the corresponding fields within the mirror.
1052   oop m = k->java_mirror();
1053   Array<int>* entry_field_records = record->entry_field_records();
1054   if (entry_field_records != NULL) {
1055     int efr_len = entry_field_records->length();
1056     assert(efr_len % 2 == 0, "sanity");
1057     for (int i = 0; i < efr_len; i += 2) {
1058       int field_offset = entry_field_records->at(i);
1059       int root_index = entry_field_records->at(i+1);
1060       oop v = get_root(root_index, /*clear=*/true);
1061       m->obj_field_put(field_offset, v);
1062       log_debug(cds, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1063     }
1064 
1065     // Done. Java code can see the archived sub-graphs referenced from k's
1066     // mirror after this point.
1067     if (log_is_enabled(Info, cds, heap)) {
1068       ResourceMark rm;
1069       log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s",
1070                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "");
1071     }
1072   }
1073 
1074   verify_the_heap(k, "after ");
1075 }
1076 
1077 void HeapShared::clear_archived_roots_of(Klass* k) {
1078   unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1079   const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1080   if (record != NULL) {
1081     Array<int>* entry_field_records = record->entry_field_records();
1082     if (entry_field_records != NULL) {
1083       int efr_len = entry_field_records->length();
1084       assert(efr_len % 2 == 0, "sanity");
1085       for (int i = 0; i < efr_len; i += 2) {
1086         int root_index = entry_field_records->at(i+1);
1087         clear_root(root_index);
1088       }
1089     }
1090   }
1091 }
1092 
1093 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1094   int _level;
1095   bool _is_closed_archive;
1096   bool _record_klasses_only;
1097   KlassSubGraphInfo* _subgraph_info;
1098   oop _orig_referencing_obj;
1099   oop _archived_referencing_obj;
1100 
1101   // The following are for maintaining a stack for determining
1102   // CachedOopInfo::_referrer
1103   static WalkOopAndArchiveClosure* _current;
1104   WalkOopAndArchiveClosure* _last;
1105  public:
1106   WalkOopAndArchiveClosure(int level,
1107                            bool is_closed_archive,
1108                            bool record_klasses_only,
1109                            KlassSubGraphInfo* subgraph_info,
1110                            oop orig, oop archived) :
1111     _level(level), _is_closed_archive(is_closed_archive),
1112     _record_klasses_only(record_klasses_only),
1113     _subgraph_info(subgraph_info),
1114     _orig_referencing_obj(orig), _archived_referencing_obj(archived) {
1115     _last = _current;
1116     _current = this;
1117   }
1118   ~WalkOopAndArchiveClosure() {
1119     _current = _last;
1120   }
1121   void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1122   void do_oop(      oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1123 
1124  protected:
1125   template <class T> void do_oop_work(T *p) {
1126     oop obj = RawAccess<>::oop_load(p);
1127     if (!CompressedOops::is_null(obj)) {
1128       assert(!HeapShared::is_archived_object_during_dumptime(obj),
1129              "original objects must not point to archived objects");
1130 
1131       size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
1132       T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta);
1133 
1134       if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1135         ResourceMark rm;
1136         log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1137                              _orig_referencing_obj->klass()->external_name(), field_delta,
1138                              p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1139         LogTarget(Trace, cds, heap) log;
1140         LogStream out(log);
1141         obj->print_on(&out);
1142       }
1143 
1144       oop archived = HeapShared::archive_reachable_objects_from(
1145           _level + 1, _subgraph_info, obj, _is_closed_archive);
1146       assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
1147       assert(HeapShared::is_archived_object_during_dumptime(archived), "must be");
1148 
1149       if (!_record_klasses_only) {
1150         // Update the reference in the archived copy of the referencing object.
1151         log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT,
1152                              _level, p2i(new_p), p2i(obj), p2i(archived));
1153         RawAccess<IS_NOT_NULL>::oop_store(new_p, archived);
1154       }
1155     }
1156   }
1157 
1158  public:
1159   static WalkOopAndArchiveClosure* current()  { return _current;              }
1160   oop orig_referencing_obj()                  { return _orig_referencing_obj; }
1161   KlassSubGraphInfo* subgraph_info()          { return _subgraph_info;        }
1162 };
1163 
1164 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL;
1165 
1166 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) {
1167   CachedOopInfo info;
1168   WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1169 
1170   info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info();
1171   info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj();
1172   info._obj = orig_obj;
1173 
1174   return info;
1175 }
1176 
1177 void HeapShared::check_closed_region_object(InstanceKlass* k) {
1178   // Check fields in the object
1179   for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
1180     if (!fs.access_flags().is_static()) {
1181       BasicType ft = fs.field_descriptor().field_type();
1182       if (!fs.access_flags().is_final() && is_reference_type(ft)) {
1183         ResourceMark rm;
1184         log_warning(cds, heap)(
1185           "Please check reference field in %s instance in closed archive heap region: %s %s",
1186           k->external_name(), (fs.name())->as_C_string(),
1187           (fs.signature())->as_C_string());
1188       }
1189     }
1190   }
1191 }
1192 
1193 // (1) If orig_obj has not been archived yet, archive it.
1194 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1195 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1196 // (3) Record the klasses of all orig_obj and all reachable objects.
1197 oop HeapShared::archive_reachable_objects_from(int level,
1198                                                KlassSubGraphInfo* subgraph_info,
1199                                                oop orig_obj,
1200                                                bool is_closed_archive) {
1201   assert(orig_obj != NULL, "must be");
1202   assert(!is_archived_object_during_dumptime(orig_obj), "sanity");
1203 
1204   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1205     // This object has injected fields that cannot be supported easily, so we disallow them for now.
1206     // If you get an error here, you probably made a change in the JDK library that has added
1207     // these objects that are referenced (directly or indirectly) by static fields.
1208     ResourceMark rm;
1209     log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1210     os::_exit(1);
1211   }
1212 
1213   // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
1214   // them as Klass::_archived_mirror because they need to be specially restored at run time.
1215   //
1216   // If you get an error here, you probably made a change in the JDK library that has added a Class
1217   // object that is referenced (directly or indirectly) by static fields.
1218   if (java_lang_Class::is_instance(orig_obj)) {
1219     log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1220     os::_exit(1);
1221   }
1222 
1223   oop archived_obj = find_archived_heap_object(orig_obj);
1224   if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
1225     // To save time, don't walk strings that are already archived. They just contain
1226     // pointers to a type array, whose klass doesn't need to be recorded.
1227     return archived_obj;
1228   }
1229 
1230   if (has_been_seen_during_subgraph_recording(orig_obj)) {
1231     // orig_obj has already been archived and traced. Nothing more to do.
1232     return archived_obj;
1233   } else {
1234     set_has_been_seen_during_subgraph_recording(orig_obj);
1235   }
1236 
1237   bool record_klasses_only = (archived_obj != NULL);
1238   if (archived_obj == NULL) {
1239     ++_num_new_archived_objs;
1240     archived_obj = archive_object(orig_obj);
1241     if (archived_obj == NULL) {
1242       // Skip archiving the sub-graph referenced from the current entry field.
1243       ResourceMark rm;
1244       log_error(cds, heap)(
1245         "Cannot archive the sub-graph referenced from %s object ("
1246         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1247         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1248       if (level == 1) {
1249         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1250         // as the Java code will take care of initializing this field dynamically.
1251         return NULL;
1252       } else {
1253         // We don't know how to handle an object that has been archived, but some of its reachable
1254         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1255         // we have a real use case.
1256         os::_exit(1);
1257       }
1258     }
1259 
1260     if (java_lang_Module::is_instance(orig_obj)) {
1261       if (Modules::check_module_oop(orig_obj)) {
1262         Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj));
1263       }
1264       java_lang_Module::set_module_entry(archived_obj, NULL);
1265     } else if (java_lang_ClassLoader::is_instance(orig_obj)) {
1266       // class_data will be restored explicitly at run time.
1267       guarantee(orig_obj == SystemDictionary::java_platform_loader() ||
1268                 orig_obj == SystemDictionary::java_system_loader() ||
1269                 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be");
1270       java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL);
1271     }
1272   }
1273 
1274   assert(archived_obj != NULL, "must be");
1275   Klass *orig_k = orig_obj->klass();
1276   subgraph_info->add_subgraph_object_klass(orig_k);
1277 
1278   WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
1279                                   subgraph_info, orig_obj, archived_obj);
1280   orig_obj->oop_iterate(&walker);
1281   if (is_closed_archive && orig_k->is_instance_klass()) {
1282     check_closed_region_object(InstanceKlass::cast(orig_k));
1283   }
1284 
1285   check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive);
1286   return archived_obj;
1287 }
1288 
1289 //
1290 // Start from the given static field in a java mirror and archive the
1291 // complete sub-graph of java heap objects that are reached directly
1292 // or indirectly from the starting object by following references.
1293 // Sub-graph archiving restrictions (current):
1294 //
1295 // - All classes of objects in the archived sub-graph (including the
1296 //   entry class) must be boot class only.
1297 // - No java.lang.Class instance (java mirror) can be included inside
1298 //   an archived sub-graph. Mirror can only be the sub-graph entry object.
1299 //
1300 // The Java heap object sub-graph archiving process (see
1301 // WalkOopAndArchiveClosure):
1302 //
1303 // 1) Java object sub-graph archiving starts from a given static field
1304 // within a Class instance (java mirror). If the static field is a
1305 // reference field and points to a non-null java object, proceed to
1306 // the next step.
1307 //
1308 // 2) Archives the referenced java object. If an archived copy of the
1309 // current object already exists, updates the pointer in the archived
1310 // copy of the referencing object to point to the current archived object.
1311 // Otherwise, proceed to the next step.
1312 //
1313 // 3) Follows all references within the current java object and recursively
1314 // archive the sub-graph of objects starting from each reference.
1315 //
1316 // 4) Updates the pointer in the archived copy of referencing object to
1317 // point to the current archived object.
1318 //
1319 // 5) The Klass of the current java object is added to the list of Klasses
1320 // for loading and initializing before any object in the archived graph can
1321 // be accessed at runtime.
1322 //
1323 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
1324                                                              const char* klass_name,
1325                                                              int field_offset,
1326                                                              const char* field_name,
1327                                                              bool is_closed_archive) {
1328   assert(DumpSharedSpaces, "dump time only");
1329   assert(k->is_shared_boot_class(), "must be boot class");
1330 
1331   oop m = k->java_mirror();
1332 
1333   KlassSubGraphInfo* subgraph_info = get_subgraph_info(k);
1334   oop f = m->obj_field(field_offset);
1335 
1336   log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f));
1337 
1338   if (!CompressedOops::is_null(f)) {
1339     if (log_is_enabled(Trace, cds, heap)) {
1340       LogTarget(Trace, cds, heap) log;
1341       LogStream out(log);
1342       f->print_on(&out);
1343     }
1344 
1345     oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
1346 
1347     if (af == NULL) {
1348       log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
1349                            klass_name, field_name);
1350     } else {
1351       // Note: the field value is not preserved in the archived mirror.
1352       // Record the field as a new subGraph entry point. The recorded
1353       // information is restored from the archive at runtime.
1354       subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive);
1355       log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af));
1356     }
1357   } else {
1358     // The field contains null, we still need to record the entry point,
1359     // so it can be restored at runtime.
1360     subgraph_info->add_subgraph_entry_field(field_offset, NULL, false);
1361   }
1362 }
1363 
1364 #ifndef PRODUCT
1365 class VerifySharedOopClosure: public BasicOopIterateClosure {
1366  private:
1367   bool _is_archived;
1368 
1369  public:
1370   VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {}
1371 
1372   void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); }
1373   void do_oop(      oop *p) { VerifySharedOopClosure::do_oop_work(p); }
1374 
1375  protected:
1376   template <class T> void do_oop_work(T *p) {
1377     oop obj = RawAccess<>::oop_load(p);
1378     if (!CompressedOops::is_null(obj)) {
1379       HeapShared::verify_reachable_objects_from(obj, _is_archived);
1380     }
1381   }
1382 };
1383 
1384 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) {
1385   assert(DumpSharedSpaces, "dump time only");
1386   assert(k->is_shared_boot_class(), "must be boot class");
1387 
1388   oop m = k->java_mirror();
1389   oop f = m->obj_field(field_offset);
1390   if (!CompressedOops::is_null(f)) {
1391     verify_subgraph_from(f);
1392   }
1393 }
1394 
1395 void HeapShared::verify_subgraph_from(oop orig_obj) {
1396   oop archived_obj = find_archived_heap_object(orig_obj);
1397   if (archived_obj == NULL) {
1398     // It's OK for the root of a subgraph to be not archived. See comments in
1399     // archive_reachable_objects_from().
1400     return;
1401   }
1402 
1403   // Verify that all objects reachable from orig_obj are archived.
1404   init_seen_objects_table();
1405   verify_reachable_objects_from(orig_obj, false);
1406   delete_seen_objects_table();
1407 
1408   // Note: we could also verify that all objects reachable from the archived
1409   // copy of orig_obj can only point to archived objects, with:
1410   //      init_seen_objects_table();
1411   //      verify_reachable_objects_from(archived_obj, true);
1412   //      init_seen_objects_table();
1413   // but that's already done in G1HeapVerifier::verify_archive_regions so we
1414   // won't do it here.
1415 }
1416 
1417 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
1418   _num_total_verifications ++;
1419   if (!has_been_seen_during_subgraph_recording(obj)) {
1420     set_has_been_seen_during_subgraph_recording(obj);
1421 
1422     if (is_archived) {
1423       assert(is_archived_object_during_dumptime(obj), "must be");
1424       assert(find_archived_heap_object(obj) == NULL, "must be");
1425     } else {
1426       assert(!is_archived_object_during_dumptime(obj), "must be");
1427       assert(find_archived_heap_object(obj) != NULL, "must be");
1428     }
1429 
1430     VerifySharedOopClosure walker(is_archived);
1431     obj->oop_iterate(&walker);
1432   }
1433 }
1434 #endif
1435 
1436 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL;
1437 int HeapShared::_num_new_walked_objs;
1438 int HeapShared::_num_new_archived_objs;
1439 int HeapShared::_num_old_recorded_klasses;
1440 
1441 int HeapShared::_num_total_subgraph_recordings = 0;
1442 int HeapShared::_num_total_walked_objs = 0;
1443 int HeapShared::_num_total_archived_objs = 0;
1444 int HeapShared::_num_total_recorded_klasses = 0;
1445 int HeapShared::_num_total_verifications = 0;
1446 
1447 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1448   return _seen_objects_table->get(obj) != NULL;
1449 }
1450 
1451 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1452   assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1453   _seen_objects_table->put(obj, true);
1454   ++ _num_new_walked_objs;
1455 }
1456 
1457 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) {
1458   log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name);
1459   init_subgraph_info(k, is_full_module_graph);
1460   init_seen_objects_table();
1461   _num_new_walked_objs = 0;
1462   _num_new_archived_objs = 0;
1463   _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses();
1464 }
1465 
1466 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) {
1467   int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() -
1468     _num_old_recorded_klasses;
1469   log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: "
1470                       "walked %d objs, archived %d new objs, recorded %d classes",
1471                       class_name, _num_new_walked_objs, _num_new_archived_objs,
1472                       num_new_recorded_klasses);
1473 
1474   delete_seen_objects_table();
1475 
1476   _num_total_subgraph_recordings ++;
1477   _num_total_walked_objs      += _num_new_walked_objs;
1478   _num_total_archived_objs    += _num_new_archived_objs;
1479   _num_total_recorded_klasses +=  num_new_recorded_klasses;
1480 }
1481 
1482 class ArchivableStaticFieldFinder: public FieldClosure {
1483   InstanceKlass* _ik;
1484   Symbol* _field_name;
1485   bool _found;
1486   int _offset;
1487 public:
1488   ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) :
1489     _ik(ik), _field_name(field_name), _found(false), _offset(-1) {}
1490 
1491   virtual void do_field(fieldDescriptor* fd) {
1492     if (fd->name() == _field_name) {
1493       assert(!_found, "fields can never be overloaded");
1494       if (is_reference_type(fd->field_type())) {
1495         _found = true;
1496         _offset = fd->offset();
1497       }
1498     }
1499   }
1500   bool found()     { return _found;  }
1501   int offset()     { return _offset; }
1502 };
1503 
1504 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1505                                             TRAPS) {
1506   for (int i = 0; fields[i].valid(); i++) {
1507     ArchivableStaticFieldInfo* info = &fields[i];
1508     TempNewSymbol klass_name =  SymbolTable::new_symbol(info->klass_name);
1509     TempNewSymbol field_name =  SymbolTable::new_symbol(info->field_name);
1510     ResourceMark rm; // for stringStream::as_string() etc.
1511 
1512 #ifndef PRODUCT
1513     bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
1514 #else
1515     bool is_test_class = false;
1516 #endif
1517 
1518     if (is_test_class) {
1519       log_warning(cds)("Loading ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
1520     }
1521 
1522     Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
1523     if (HAS_PENDING_EXCEPTION) {
1524       CLEAR_PENDING_EXCEPTION;
1525       stringStream st;
1526       st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
1527       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1528     }
1529 
1530     if (!k->is_instance_klass()) {
1531       stringStream st;
1532       st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
1533       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1534     }
1535 
1536     InstanceKlass* ik = InstanceKlass::cast(k);
1537     assert(InstanceKlass::cast(ik)->is_shared_boot_class(),
1538            "Only support boot classes");
1539 
1540     if (is_test_class) {
1541       if (ik->module()->is_named()) {
1542         // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary
1543         // core-lib classes. You need to at least append to the bootclasspath.
1544         stringStream st;
1545         st.print("ArchiveHeapTestClass %s is not in unnamed module", ArchiveHeapTestClass);
1546         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1547       }
1548 
1549       if (ik->package() != NULL) {
1550         // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy.
1551         stringStream st;
1552         st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass);
1553         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1554       }
1555     } else {
1556       if (ik->module()->name() != vmSymbols::java_base()) {
1557         // We don't want to deal with cases when a module is unavailable at runtime.
1558         // FUTURE -- load from archived heap only when module graph has not changed
1559         //           between dump and runtime.
1560         stringStream st;
1561         st.print("%s is not in java.base module", info->klass_name);
1562         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1563       }
1564     }
1565 
1566     if (is_test_class) {
1567       log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", ArchiveHeapTestClass);
1568     }
1569     ik->initialize(CHECK);
1570 
1571     ArchivableStaticFieldFinder finder(ik, field_name);
1572     ik->do_local_static_fields(&finder);
1573     if (!finder.found()) {
1574       stringStream st;
1575       st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name);
1576       THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1577     }
1578 
1579     info->klass = ik;
1580     info->offset = finder.offset();
1581   }
1582 }
1583 
1584 void HeapShared::init_subgraph_entry_fields(TRAPS) {
1585   assert(HeapShared::can_write(), "must be");
1586   _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
1587   init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK);
1588   init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK);
1589   if (MetaspaceShared::use_full_module_graph()) {
1590     init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK);
1591   }
1592 }
1593 
1594 #ifndef PRODUCT
1595 void HeapShared::setup_test_class(const char* test_class_name) {
1596   ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields;
1597   int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
1598   assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below");
1599   assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list");
1600 
1601   if (test_class_name != NULL) {
1602     p[num_slots - 2].klass_name = test_class_name;
1603     p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME;
1604   }
1605 }
1606 
1607 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass
1608 // during runtime. This may be called before the module system is initialized so
1609 // we cannot rely on InstanceKlass::module(), etc.
1610 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
1611   if (_test_class != NULL) {
1612     if (ik == _test_class) {
1613       return true;
1614     }
1615     Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses();
1616     if (klasses == NULL) {
1617       return false;
1618     }
1619 
1620     for (int i = 0; i < klasses->length(); i++) {
1621       Klass* k = klasses->at(i);
1622       if (k == ik) {
1623         Symbol* name;
1624         if (k->is_instance_klass()) {
1625           name = InstanceKlass::cast(k)->name();
1626         } else if (k->is_objArray_klass()) {
1627           Klass* bk = ObjArrayKlass::cast(k)->bottom_klass();
1628           if (!bk->is_instance_klass()) {
1629             return false;
1630           }
1631           name = bk->name();
1632         } else {
1633           return false;
1634         }
1635 
1636         // See KlassSubGraphInfo::check_allowed_klass() - only two types of
1637         // classes are allowed:
1638         //   (A) java.base classes (which must not be in the unnamed module)
1639         //   (B) test classes which must be in the unnamed package of the unnamed module.
1640         // So if we see a '/' character in the class name, it must be in (A);
1641         // otherwise it must be in (B).
1642         if (name->index_of_at(0, "/", 1)  >= 0) {
1643           return false; // (A)
1644         }
1645 
1646         return true; // (B)
1647       }
1648     }
1649   }
1650 
1651   return false;
1652 }
1653 #endif
1654 
1655 void HeapShared::init_for_dumping(TRAPS) {
1656   if (HeapShared::can_write()) {
1657     setup_test_class(ArchiveHeapTestClass);
1658     _dumped_interned_strings = new (mtClass)DumpedInternedStrings();
1659     _native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048);
1660     init_subgraph_entry_fields(CHECK);
1661   }
1662 }
1663 
1664 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1665                                           bool is_closed_archive,
1666                                           bool is_full_module_graph) {
1667   _num_total_subgraph_recordings = 0;
1668   _num_total_walked_objs = 0;
1669   _num_total_archived_objs = 0;
1670   _num_total_recorded_klasses = 0;
1671   _num_total_verifications = 0;
1672 
1673   // For each class X that has one or more archived fields:
1674   // [1] Dump the subgraph of each archived field
1675   // [2] Create a list of all the class of the objects that can be reached
1676   //     by any of these static fields.
1677   //     At runtime, these classes are initialized before X's archived fields
1678   //     are restored by HeapShared::initialize_from_archived_subgraph().
1679   int i;
1680   for (int i = 0; fields[i].valid(); ) {
1681     ArchivableStaticFieldInfo* info = &fields[i];
1682     const char* klass_name = info->klass_name;
1683     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1684 
1685     // If you have specified consecutive fields of the same klass in
1686     // fields[], these will be archived in the same
1687     // {start_recording_subgraph ... done_recording_subgraph} pass to
1688     // save time.
1689     for (; fields[i].valid(); i++) {
1690       ArchivableStaticFieldInfo* f = &fields[i];
1691       if (f->klass_name != klass_name) {
1692         break;
1693       }
1694 
1695       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1696                                                   f->offset, f->field_name,
1697                                                   is_closed_archive);
1698     }
1699     done_recording_subgraph(info->klass, klass_name);
1700   }
1701 
1702   log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
1703                       is_closed_archive ? "closed" : "open",
1704                       _num_total_subgraph_recordings);
1705   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
1706   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
1707   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
1708 
1709 #ifndef PRODUCT
1710   for (int i = 0; fields[i].valid(); i++) {
1711     ArchivableStaticFieldInfo* f = &fields[i];
1712     verify_subgraph_from_static_field(f->klass, f->offset);
1713   }
1714   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
1715 #endif
1716 }
1717 
1718 // Not all the strings in the global StringTable are dumped into the archive, because
1719 // some of those strings may be only referenced by classes that are excluded from
1720 // the archive. We need to explicitly mark the strings that are:
1721 //   [1] used by classes that WILL be archived;
1722 //   [2] included in the SharedArchiveConfigFile.
1723 void HeapShared::add_to_dumped_interned_strings(oop string) {
1724   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
1725   bool created;
1726   _dumped_interned_strings->put_if_absent(string, true, &created);
1727 }
1728 
1729 // At dump-time, find the location of all the non-null oop pointers in an archived heap
1730 // region. This way we can quickly relocate all the pointers without using
1731 // BasicOopIterateClosure at runtime.
1732 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
1733   void* _start;
1734   BitMap *_oopmap;
1735   int _num_total_oops;
1736   int _num_null_oops;
1737  public:
1738   FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
1739     : _start(start), _oopmap(oopmap), _num_total_oops(0),  _num_null_oops(0) {}
1740 
1741   virtual void do_oop(narrowOop* p) {
1742     assert(UseCompressedOops, "sanity");
1743     _num_total_oops ++;
1744     narrowOop v = *p;
1745     if (!CompressedOops::is_null(v)) {
1746       // Note: HeapShared::to_requested_address() is not necessary because
1747       // the heap always starts at a deterministic address with UseCompressedOops==true.
1748       size_t idx = p - (narrowOop*)_start;
1749       _oopmap->set_bit(idx);
1750     } else {
1751       _num_null_oops ++;
1752     }
1753   }
1754   virtual void do_oop(oop* p) {
1755     assert(!UseCompressedOops, "sanity");
1756     _num_total_oops ++;
1757     if ((*p) != NULL) {
1758       size_t idx = p - (oop*)_start;
1759       _oopmap->set_bit(idx);
1760       if (DumpSharedSpaces) {
1761         // Make heap content deterministic.
1762         *p = HeapShared::to_requested_address(*p);
1763       }
1764     } else {
1765       _num_null_oops ++;
1766     }
1767   }
1768   int num_total_oops() const { return _num_total_oops; }
1769   int num_null_oops()  const { return _num_null_oops; }
1770 };
1771 
1772 
1773 address HeapShared::to_requested_address(address dumptime_addr) {
1774   assert(DumpSharedSpaces, "static dump time only");
1775   if (dumptime_addr == NULL || UseCompressedOops) {
1776     return dumptime_addr;
1777   }
1778 
1779   // With UseCompressedOops==false, actual_base is selected by the OS so
1780   // it's different across -Xshare:dump runs.
1781   address actual_base = (address)G1CollectedHeap::heap()->reserved().start();
1782   address actual_end  = (address)G1CollectedHeap::heap()->reserved().end();
1783   assert(actual_base <= dumptime_addr && dumptime_addr <= actual_end, "must be an address in the heap");
1784 
1785   // We always write the objects as if the heap started at this address. This
1786   // makes the heap content deterministic.
1787   //
1788   // Note that at runtime, the heap address is also selected by the OS, so
1789   // the archive heap will not be mapped at 0x10000000. Instead, we will call
1790   // HeapShared::patch_embedded_pointers() to relocate the heap contents
1791   // accordingly.
1792   const address REQUESTED_BASE = (address)0x10000000;
1793   intx delta = REQUESTED_BASE - actual_base;
1794 
1795   address requested_addr = dumptime_addr + delta;
1796   assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity");
1797   return requested_addr;
1798 }
1799 
1800 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
1801   size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
1802   ResourceBitMap oopmap(num_bits);
1803 
1804   HeapWord* p   = region.start();
1805   HeapWord* end = region.end();
1806   FindEmbeddedNonNullPointers finder((void*)p, &oopmap);
1807   ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
1808 
1809   int num_objs = 0;
1810   while (p < end) {
1811     oop o = cast_to_oop(p);
1812     o->oop_iterate(&finder);
1813     p += o->size();
1814     if (DumpSharedSpaces) {
1815       builder->relocate_klass_ptr_of_oop(o);
1816     }
1817     ++ num_objs;
1818   }
1819 
1820   log_info(cds, heap)("calculate_oopmap: objects = %6d, oop fields = %7d (nulls = %7d)",
1821                       num_objs, finder.num_total_oops(), finder.num_null_oops());
1822   return oopmap;
1823 }
1824 
1825 
1826 ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) {
1827   size_t num_bits = region.byte_size() / sizeof(Metadata*);
1828   ResourceBitMap oopmap(num_bits);
1829 
1830   Metadata** start = (Metadata**)region.start();
1831   Metadata** end   = (Metadata**)region.end();
1832 
1833   int num_non_null_ptrs = 0;
1834   int len = _native_pointers->length();
1835   for (int i = 0; i < len; i++) {
1836     Metadata** p = _native_pointers->at(i);
1837     if (start <= p && p < end) {
1838       assert(*p != NULL, "must be non-null");
1839       num_non_null_ptrs ++;
1840       size_t idx = p - start;
1841       oopmap.set_bit(idx);
1842     }
1843   }
1844 
1845   log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of "
1846                       SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits);
1847   if (num_non_null_ptrs > 0) {
1848     return oopmap;
1849   } else {
1850     return ResourceBitMap(0);
1851   }
1852 }
1853 
1854 void HeapShared::count_allocation(size_t size) {
1855   _total_obj_count ++;
1856   _total_obj_size += size;
1857   for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
1858     if (size <= (size_t(1) << i)) {
1859       _alloc_count[i] ++;
1860       _alloc_size[i] += size;
1861       return;
1862     }
1863   }
1864 }
1865 
1866 static double avg_size(size_t size, size_t count) {
1867   double avg = 0;
1868   if (count > 0) {
1869     avg = double(size * HeapWordSize) / double(count);
1870   }
1871   return avg;
1872 }
1873 
1874 void HeapShared::print_stats() {
1875   size_t huge_count = _total_obj_count;
1876   size_t huge_size = _total_obj_size;
1877 
1878   for (int i = 0; i < ALLOC_STAT_SLOTS; i++) {
1879     size_t byte_size_limit = (size_t(1) << i) * HeapWordSize;
1880     size_t count = _alloc_count[i];
1881     size_t size = _alloc_size[i];
1882     log_info(cds, heap)(SIZE_FORMAT_W(8) " objects are <= " SIZE_FORMAT_W(-6)
1883                         " bytes (total " SIZE_FORMAT_W(8) " bytes, avg %8.1f bytes)",
1884                         count, byte_size_limit, size * HeapWordSize, avg_size(size, count));
1885     huge_count -= count;
1886     huge_size -= size;
1887   }
1888 
1889   log_info(cds, heap)(SIZE_FORMAT_W(8) " huge  objects               (total "  SIZE_FORMAT_W(8) " bytes"
1890                       ", avg %8.1f bytes)",
1891                       huge_count, huge_size * HeapWordSize,
1892                       avg_size(huge_size, huge_count));
1893   log_info(cds, heap)(SIZE_FORMAT_W(8) " total objects               (total "  SIZE_FORMAT_W(8) " bytes"
1894                       ", avg %8.1f bytes)",
1895                       _total_obj_count, _total_obj_size * HeapWordSize,
1896                       avg_size(_total_obj_size, _total_obj_count));
1897 }
1898 
1899 #endif // INCLUDE_CDS_JAVA_HEAP