< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "cds/aotArtifactFinder.hpp"
  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/aotClassLocation.hpp"
  28 #include "cds/aotLogging.hpp"
  29 #include "cds/aotReferenceObjSupport.hpp"
  30 #include "cds/archiveBuilder.hpp"
  31 #include "cds/archiveHeapLoader.hpp"
  32 #include "cds/archiveHeapWriter.hpp"
  33 #include "cds/archiveUtils.hpp"
  34 #include "cds/cdsConfig.hpp"
  35 #include "cds/cdsEnumKlass.hpp"
  36 #include "cds/cdsHeapVerifier.hpp"
  37 #include "cds/heapShared.hpp"
  38 #include "cds/metaspaceShared.hpp"
  39 #include "classfile/classLoaderData.hpp"
  40 #include "classfile/classLoaderExt.hpp"
  41 #include "classfile/javaClasses.inline.hpp"
  42 #include "classfile/modules.hpp"
  43 #include "classfile/stringTable.hpp"
  44 #include "classfile/symbolTable.hpp"
  45 #include "classfile/systemDictionary.hpp"
  46 #include "classfile/systemDictionaryShared.hpp"
  47 #include "classfile/vmClasses.hpp"
  48 #include "classfile/vmSymbols.hpp"
  49 #include "gc/shared/collectedHeap.hpp"
  50 #include "gc/shared/gcLocker.hpp"
  51 #include "gc/shared/gcVMOperations.hpp"
  52 #include "logging/log.hpp"
  53 #include "logging/logStream.hpp"
  54 #include "memory/iterator.inline.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/universe.hpp"
  57 #include "oops/compressedOops.inline.hpp"
  58 #include "oops/fieldStreams.inline.hpp"
  59 #include "oops/objArrayOop.inline.hpp"
  60 #include "oops/oop.inline.hpp"

  61 #include "oops/typeArrayOop.inline.hpp"
  62 #include "prims/jvmtiExport.hpp"
  63 #include "runtime/arguments.hpp"
  64 #include "runtime/fieldDescriptor.inline.hpp"
  65 #include "runtime/init.hpp"
  66 #include "runtime/javaCalls.hpp"
  67 #include "runtime/mutexLocker.hpp"
  68 #include "runtime/safepointVerifiers.hpp"
  69 #include "utilities/bitMap.inline.hpp"
  70 #include "utilities/copy.hpp"
  71 #if INCLUDE_G1GC
  72 #include "gc/g1/g1CollectedHeap.hpp"
  73 #endif
  74 
  75 #if INCLUDE_CDS_JAVA_HEAP
  76 
  77 struct ArchivableStaticFieldInfo {
  78   const char* klass_name;
  79   const char* field_name;
  80   InstanceKlass* klass;
  81   int offset;
  82   BasicType type;
  83 
  84   ArchivableStaticFieldInfo(const char* k, const char* f)
  85   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  86 
  87   bool valid() {
  88     return klass_name != nullptr;
  89   }
  90 };
  91 











  92 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
  93 
  94 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
  95 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
  96 size_t HeapShared::_total_obj_count;
  97 size_t HeapShared::_total_obj_size;
  98 
  99 #ifndef PRODUCT
 100 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 101 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 102 static const char* _test_class_name = nullptr;
 103 static Klass* _test_class = nullptr;
 104 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 105 #endif
 106 
 107 
 108 //
 109 // If you add new entries to the following tables, you should know what you're doing!
 110 //
 111 
 112 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 113   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 114   {"java/lang/Long$LongCache",                    "archivedCache"},
 115   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 116   {"java/lang/Short$ShortCache",                  "archivedCache"},
 117   {"java/lang/Character$CharacterCache",          "archivedCache"},
 118   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 119   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 120   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 121   {"java/util/ImmutableCollections",              "archivedObjects"},
 122   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 123   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 124   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},

 125 
 126 #ifndef PRODUCT
 127   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 128 #endif
 129   {nullptr, nullptr},
 130 };
 131 
 132 // full module graph
 133 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 134   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 135   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 136   {"java/lang/Module$ArchivedData",               "archivedData"},
 137   {nullptr, nullptr},
 138 };
 139 
 140 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 141 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 142 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;

 143 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
 144 int HeapShared::_root_segment_max_size_elems;
 145 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 146 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
 147 
 148 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 149   for (int i = 0; fields[i].valid(); i++) {
 150     if (fields[i].klass == ik) {
 151       return true;
 152     }
 153   }
 154   return false;
 155 }
 156 
 157 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 158   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 159          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 160 }
 161 






















 162 unsigned HeapShared::oop_hash(oop const& p) {


 163   // Do not call p->identity_hash() as that will update the
 164   // object header.
 165   return primitive_hash(cast_from_oop<intptr_t>(p));
 166 }
 167 

















 168 static void reset_states(oop obj, TRAPS) {
 169   Handle h_obj(THREAD, obj);
 170   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 171   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 172   Symbol* method_sig = vmSymbols::void_method_signature();
 173 
 174   while (klass != nullptr) {
 175     Method* method = klass->find_method(method_name, method_sig);
 176     if (method != nullptr) {
 177       assert(method->is_private(), "must be");
 178       if (log_is_enabled(Debug, aot)) {
 179         ResourceMark rm(THREAD);
 180         log_debug(aot)("  calling %s", method->name_and_sig_as_C_string());
 181       }
 182       JavaValue result(T_VOID);
 183       JavaCalls::call_special(&result, h_obj, klass,
 184                               method_name, method_sig, CHECK);
 185     }
 186     klass = klass->java_super();
 187   }

 199   // to keep track of resources, etc, loaded by the null class loader.
 200   //
 201   // Note, this object is non-null, and is not the same as
 202   // ClassLoaderData::the_null_class_loader_data()->class_loader(),
 203   // which is null.
 204   log_debug(aot)("Resetting boot loader");
 205   JavaValue result(T_OBJECT);
 206   JavaCalls::call_static(&result,
 207                          vmClasses::jdk_internal_loader_ClassLoaders_klass(),
 208                          vmSymbols::bootLoader_name(),
 209                          vmSymbols::void_BuiltinClassLoader_signature(),
 210                          CHECK);
 211   Handle boot_loader(THREAD, result.get_oop());
 212   reset_states(boot_loader(), CHECK);
 213 }
 214 
 215 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 216 
 217 bool HeapShared::has_been_archived(oop obj) {
 218   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 219   return archived_object_cache()->get(obj) != nullptr;

 220 }
 221 
 222 int HeapShared::append_root(oop obj) {
 223   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 224   if (obj != nullptr) {
 225     assert(has_been_archived(obj), "must be");
 226   }
 227   // No GC should happen since we aren't scanning _pending_roots.
 228   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 229 
 230   return _pending_roots->append(obj);

 231 }
 232 
 233 objArrayOop HeapShared::root_segment(int segment_idx) {
 234   if (CDSConfig::is_dumping_heap()) {
 235     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 236   } else {
 237     assert(CDSConfig::is_using_archive(), "must be");
 238   }
 239 
 240   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 241   assert(segment != nullptr, "should have been initialized");
 242   return segment;
 243 }
 244 



























































































































































 245 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 246   assert(_root_segment_max_size_elems > 0, "sanity");
 247 
 248   // Try to avoid divisions for the common case.
 249   if (idx < _root_segment_max_size_elems) {
 250     seg_idx = 0;
 251     int_idx = idx;
 252   } else {
 253     seg_idx = idx / _root_segment_max_size_elems;
 254     int_idx = idx % _root_segment_max_size_elems;
 255   }
 256 
 257   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 258          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 259 }
 260 
 261 // Returns an objArray that contains all the roots of the archived objects
 262 oop HeapShared::get_root(int index, bool clear) {
 263   assert(index >= 0, "sanity");
 264   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 286   }
 287 }
 288 
 289 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
 290   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 291 
 292   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 293   if (has_been_archived(obj)) {
 294     return true;
 295   }
 296 
 297   if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
 298     log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
 299                          p2i(obj), obj->size());
 300     debug_trace();
 301     return false;
 302   } else {
 303     count_allocation(obj->size());
 304     ArchiveHeapWriter::add_source_obj(obj);
 305     CachedOopInfo info = make_cached_oop_info(obj, referrer);
 306     archived_object_cache()->put_when_absent(obj, info);


 307     archived_object_cache()->maybe_grow();
 308     mark_native_pointers(obj);
 309 
 310     Klass* k = obj->klass();
 311     if (k->is_instance_klass()) {
 312       // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
 313       // This ensures that during the production run, whenever Java code sees a cached object
 314       // of type X, we know that X is already initialized. (see TODO comment below ...)
 315 
 316       if (InstanceKlass::cast(k)->is_enum_subclass()
 317           // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
 318           // we must store them as AOT-initialized.
 319           || (subgraph_info == _dump_time_special_subgraph))
 320           // TODO: we do this only for the special subgraph for now. Extending this to
 321           // other subgraphs would require more refactoring of the core library (such as
 322           // move some initialization logic into runtimeSetup()).
 323           //
 324           // For the other subgraphs, we have a weaker mechanism to ensure that
 325           // all classes in a subgraph are initialized before the subgraph is programmatically
 326           // returned from jdk.internal.misc.CDS::initializeFromArchive().

 389     OopHandle* handle = get(ptr);
 390     if (handle != nullptr) {
 391       handle->release(Universe::vm_global());
 392       remove(ptr);
 393     }
 394   }
 395 };
 396 
 397 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 398   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 399     _scratch_objects_table->set_oop(src, dest);
 400   }
 401 }
 402 
 403 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 404   return (objArrayOop)_scratch_objects_table->get_oop(src);
 405 }
 406 
 407 void HeapShared::init_dumping() {
 408   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 409   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 410 }
 411 
 412 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 413   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 414     BasicType bt = (BasicType)i;
 415     if (!is_reference_type(bt)) {
 416       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 417       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);

 418     }
 419   }
 420 }
 421 
 422 // Given java_mirror that represents a (primitive or reference) type T,
 423 // return the "scratch" version that represents the same type T.
 424 // Note that if java_mirror will be returned if it's already a
 425 // scratch mirror.
 426 //
 427 // See java_lang_Class::create_scratch_mirror() for more info.
 428 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 429   assert(java_lang_Class::is_instance(java_mirror), "must be");
 430 
 431   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 432     BasicType bt = (BasicType)i;
 433     if (!is_reference_type(bt)) {
 434       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 435         return java_mirror;
 436       }
 437     }
 438   }
 439 
 440   if (java_lang_Class::is_primitive(java_mirror)) {
 441     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 442   } else {
 443     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 444   }
 445 }
 446 
 447 oop HeapShared::scratch_java_mirror(BasicType t) {
 448   assert((uint)t < T_VOID+1, "range check");
 449   assert(!is_reference_type(t), "sanity");
 450   return _scratch_basic_type_mirrors[t].resolve();
 451 }
 452 
 453 oop HeapShared::scratch_java_mirror(Klass* k) {
 454   return _scratch_objects_table->get_oop(k);
 455 }
 456 
 457 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {

 458   _scratch_objects_table->set_oop(k, mirror);
 459 }
 460 
 461 void HeapShared::remove_scratch_objects(Klass* k) {
 462   // Klass is being deallocated. Java mirror can still be alive, and it should not
 463   // point to dead klass. We need to break the link from mirror to the Klass.
 464   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 465   oop mirror = _scratch_objects_table->get_oop(k);
 466   if (mirror != nullptr) {
 467     java_lang_Class::set_klass(mirror, nullptr);
 468   }
 469   _scratch_objects_table->remove_oop(k);
 470   if (k->is_instance_klass()) {
 471     _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
 472   }









 473 }
 474 
 475 //TODO: we eventually want a more direct test for these kinds of things.
 476 //For example the JVM could record some bit of context from the creation
 477 //of the klass, such as who called the hidden class factory.  Using
 478 //string compares on names is fragile and will break as soon as somebody
 479 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 480 //related ideas about marking AOT-related classes.
 481 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 482   return ik->is_hidden() &&
 483     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 484      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 485      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 486      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 487 }
 488 
 489 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 490   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 491 }
 492 

 612   assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
 613   StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
 614 }
 615 
 616 int HeapShared::archive_exception_instance(oop exception) {
 617   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 618   assert(success, "sanity");
 619   return append_root(exception);
 620 }
 621 
 622 void HeapShared::mark_native_pointers(oop orig_obj) {
 623   if (java_lang_Class::is_instance(orig_obj)) {
 624     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
 625     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
 626   } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
 627     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
 628   }
 629 }
 630 
 631 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
 632   CachedOopInfo* info = archived_object_cache()->get(src_obj);

 633   assert(info != nullptr, "must be");
 634   has_oop_pointers = info->has_oop_pointers();
 635   has_native_pointers = info->has_native_pointers();
 636 }
 637 
 638 void HeapShared::set_has_native_pointers(oop src_obj) {
 639   CachedOopInfo* info = archived_object_cache()->get(src_obj);

 640   assert(info != nullptr, "must be");
 641   info->set_has_native_pointers();
 642 }
 643 
 644 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
 645 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
 646 void HeapShared::start_scanning_for_oops() {
 647   {
 648     NoSafepointVerifier nsv;
 649 
 650     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 651     // for convenience.
 652     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);

 653 
 654     // Cache for recording where the archived objects are copied to
 655     create_archived_object_cache();
 656 
 657     if (UseCompressedOops || UseG1GC) {
 658       aot_log_info(aot)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 659                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 660                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 661                     UseCompressedOops ? p2i(CompressedOops::end()) :
 662                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 663     }
 664 
 665     archive_subgraphs();
 666   }
 667 
 668   init_seen_objects_table();
 669   Universe::archive_exception_instances();
 670 }
 671 
 672 void HeapShared::end_scanning_for_oops() {
 673   archive_strings();
 674   delete_seen_objects_table();
 675 }
 676 
 677 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
 678   {
 679     NoSafepointVerifier nsv;
 680     CDSHeapVerifier::verify();


 681     check_special_subgraph_classes();
 682   }
 683 
 684   StringTable::write_shared_table();
 685   ArchiveHeapWriter::write(_pending_roots, heap_info);





 686 
 687   ArchiveBuilder::OtherROAllocMark mark;
 688   write_subgraph_info_table();
 689 }
 690 
 691 void HeapShared::scan_java_mirror(oop orig_mirror) {
 692   oop m = scratch_java_mirror(orig_mirror);
 693   if (m != nullptr) { // nullptr if for custom class loader
 694     copy_java_mirror_hashcode(orig_mirror, m);
 695     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 696     assert(success, "sanity");
 697   }
 698 }
 699 
 700 void HeapShared::scan_java_class(Klass* orig_k) {
 701   scan_java_mirror(orig_k->java_mirror());
 702 
 703   if (orig_k->is_instance_klass()) {
 704     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
 705     orig_ik->constants()->prepare_resolved_references_for_archiving();

1071                           which, k->external_name());
1072       FlagSetting fs1(VerifyBeforeGC, true);
1073       FlagSetting fs2(VerifyDuringGC, true);
1074       FlagSetting fs3(VerifyAfterGC,  true);
1075       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1076     }
1077   }
1078 }
1079 
1080 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1081 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1082 //
1083 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1084 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1085 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1086 void HeapShared::resolve_classes(JavaThread* current) {
1087   assert(CDSConfig::is_using_archive(), "runtime only!");
1088   if (!ArchiveHeapLoader::is_in_use()) {
1089     return; // nothing to do
1090   }













1091   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1092   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1093 }
1094 
1095 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1096   for (int i = 0; fields[i].valid(); i++) {
1097     ArchivableStaticFieldInfo* info = &fields[i];
1098     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1099     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1100     assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1101     resolve_classes_for_subgraph_of(current, k);
1102   }
1103 }
1104 
1105 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1106   JavaThread* THREAD = current;
1107   ExceptionMark em(THREAD);
1108   const ArchivedKlassSubGraphInfoRecord* record =
1109    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1110   if (HAS_PENDING_EXCEPTION) {

1429 };
1430 
1431 // Checks if an oop has any non-null oop fields
1432 class PointsToOopsChecker : public BasicOopIterateClosure {
1433   bool _result;
1434 
1435   template <class T> void check(T *p) {
1436     _result |= (HeapAccess<>::oop_load(p) != nullptr);
1437   }
1438 
1439 public:
1440   PointsToOopsChecker() : _result(false) {}
1441   void do_oop(narrowOop *p) { check(p); }
1442   void do_oop(      oop *p) { check(p); }
1443   bool result() { return _result; }
1444 };
1445 
1446 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1447   PointsToOopsChecker points_to_oops_checker;
1448   obj->oop_iterate(&points_to_oops_checker);
1449   return CachedOopInfo(referrer, points_to_oops_checker.result());
1450 }
1451 
1452 void HeapShared::init_box_classes(TRAPS) {
1453   if (ArchiveHeapLoader::is_in_use()) {
1454     vmClasses::Boolean_klass()->initialize(CHECK);
1455     vmClasses::Character_klass()->initialize(CHECK);
1456     vmClasses::Float_klass()->initialize(CHECK);
1457     vmClasses::Double_klass()->initialize(CHECK);
1458     vmClasses::Byte_klass()->initialize(CHECK);
1459     vmClasses::Short_klass()->initialize(CHECK);
1460     vmClasses::Integer_klass()->initialize(CHECK);
1461     vmClasses::Long_klass()->initialize(CHECK);
1462     vmClasses::Void_klass()->initialize(CHECK);
1463   }
1464 }
1465 














1466 // (1) If orig_obj has not been archived yet, archive it.
1467 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1468 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1469 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1470 //     were already archived when this function is called)
1471 bool HeapShared::archive_reachable_objects_from(int level,
1472                                                 KlassSubGraphInfo* subgraph_info,
1473                                                 oop orig_obj) {
1474   assert(orig_obj != nullptr, "must be");
1475   PendingOopStack stack;
1476   stack.push(PendingOop(orig_obj, nullptr, level));
1477 
1478   while (stack.length() > 0) {
1479     PendingOop po = stack.pop();
1480     _object_being_archived = po;
1481     bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1482     _object_being_archived = PendingOop();
1483 
1484     if (!status) {
1485       // Don't archive a subgraph root that's too big. For archives static fields, that's OK

1561 
1562   bool already_archived = has_been_archived(orig_obj);
1563   bool record_klasses_only = already_archived;
1564   if (!already_archived) {
1565     ++_num_new_archived_objs;
1566     if (!archive_object(orig_obj, referrer, subgraph_info)) {
1567       // Skip archiving the sub-graph referenced from the current entry field.
1568       ResourceMark rm;
1569       log_error(aot, heap)(
1570         "Cannot archive the sub-graph referenced from %s object ("
1571         PTR_FORMAT ") size %zu, skipped.",
1572         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1573       if (level == 1) {
1574         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1575         // as the Java code will take care of initializing this field dynamically.
1576         return false;
1577       } else {
1578         // We don't know how to handle an object that has been archived, but some of its reachable
1579         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1580         // we have a real use case.
1581         MetaspaceShared::unrecoverable_writing_error();
1582       }
1583     }
1584   }
1585 
1586   Klass *orig_k = orig_obj->klass();
1587   subgraph_info->add_subgraph_object_klass(orig_k);
1588 
1589   {
1590     // Find all the oops that are referenced by orig_obj, push them onto the stack
1591     // so we can work on them next.
1592     ResourceMark rm;
1593     OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1594     orig_obj->oop_iterate(&pusher);
1595   }
1596 
1597   if (CDSConfig::is_initing_classes_at_dump_time()) {
1598     // The enum klasses are archived with aot-initialized mirror.
1599     // See AOTClassInitializer::can_archive_initialized_mirror().
1600   } else {
1601     if (CDSEnumKlass::is_enum_obj(orig_obj)) {

2015 
2016 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2017                                           bool is_full_module_graph) {
2018   _num_total_subgraph_recordings = 0;
2019   _num_total_walked_objs = 0;
2020   _num_total_archived_objs = 0;
2021   _num_total_recorded_klasses = 0;
2022   _num_total_verifications = 0;
2023 
2024   // For each class X that has one or more archived fields:
2025   // [1] Dump the subgraph of each archived field
2026   // [2] Create a list of all the class of the objects that can be reached
2027   //     by any of these static fields.
2028   //     At runtime, these classes are initialized before X's archived fields
2029   //     are restored by HeapShared::initialize_from_archived_subgraph().
2030   for (int i = 0; fields[i].valid(); ) {
2031     ArchivableStaticFieldInfo* info = &fields[i];
2032     const char* klass_name = info->klass_name;
2033     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2034 

2035     // If you have specified consecutive fields of the same klass in
2036     // fields[], these will be archived in the same
2037     // {start_recording_subgraph ... done_recording_subgraph} pass to
2038     // save time.
2039     for (; fields[i].valid(); i++) {
2040       ArchivableStaticFieldInfo* f = &fields[i];
2041       if (f->klass_name != klass_name) {
2042         break;
2043       }
2044 

2045       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2046                                                   f->offset, f->field_name);
2047     }
2048     done_recording_subgraph(info->klass, klass_name);
2049   }
2050 
2051   log_info(aot, heap)("Archived subgraph records = %d",
2052                       _num_total_subgraph_recordings);
2053   log_info(aot, heap)("  Walked %d objects", _num_total_walked_objs);
2054   log_info(aot, heap)("  Archived %d objects", _num_total_archived_objs);
2055   log_info(aot, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2056 
2057 #ifndef PRODUCT
2058   for (int i = 0; fields[i].valid(); i++) {
2059     ArchivableStaticFieldInfo* f = &fields[i];
2060     verify_subgraph_from_static_field(f->klass, f->offset);
2061   }
2062   log_info(aot, heap)("  Verified %d references", _num_total_verifications);
2063 #endif
2064 }
2065 
2066 // Keep track of the contents of the archived interned string table. This table
2067 // is used only by CDSHeapVerifier.
2068 void HeapShared::add_to_dumped_interned_strings(oop string) {
2069   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2070   assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2071   bool created;
2072   _dumped_interned_strings->put_if_absent(string, true, &created);
2073   if (created) {
2074     // Prevent string deduplication from changing the value field to
2075     // something not in the archive.
2076     java_lang_String::set_deduplication_forbidden(string);
2077     _dumped_interned_strings->maybe_grow();
2078   }
2079 }
2080 
2081 bool HeapShared::is_dumped_interned_string(oop o) {
2082   return _dumped_interned_strings->get(o) != nullptr;
2083 }
2084 












2085 void HeapShared::debug_trace() {
2086   ResourceMark rm;
2087   oop referrer = _object_being_archived.referrer();
2088   if (referrer != nullptr) {
2089     LogStream ls(Log(aot, heap)::error());
2090     ls.print_cr("Reference trace");
2091     CDSHeapVerifier::trace_to_root(&ls, referrer);
2092   }
2093 }
2094 
2095 #ifndef PRODUCT
2096 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2097 // region. This way we can quickly relocate all the pointers without using
2098 // BasicOopIterateClosure at runtime.
2099 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2100   void* _start;
2101   BitMap *_oopmap;
2102   int _num_total_oops;
2103   int _num_null_oops;
2104  public:

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotCacheAccess.hpp"
  26 #include "cds/aotArtifactFinder.hpp"
  27 #include "cds/aotClassInitializer.hpp"
  28 #include "cds/aotClassLocation.hpp"
  29 #include "cds/aotLogging.hpp"
  30 #include "cds/aotReferenceObjSupport.hpp"
  31 #include "cds/archiveBuilder.hpp"
  32 #include "cds/archiveHeapLoader.hpp"
  33 #include "cds/archiveHeapWriter.hpp"
  34 #include "cds/archiveUtils.hpp"
  35 #include "cds/cdsConfig.hpp"
  36 #include "cds/cdsEnumKlass.hpp"
  37 #include "cds/cdsHeapVerifier.hpp"
  38 #include "cds/heapShared.hpp"
  39 #include "cds/metaspaceShared.hpp"
  40 #include "classfile/classLoaderData.hpp"
  41 #include "classfile/classLoaderExt.hpp"
  42 #include "classfile/javaClasses.inline.hpp"
  43 #include "classfile/modules.hpp"
  44 #include "classfile/stringTable.hpp"
  45 #include "classfile/symbolTable.hpp"
  46 #include "classfile/systemDictionary.hpp"
  47 #include "classfile/systemDictionaryShared.hpp"
  48 #include "classfile/vmClasses.hpp"
  49 #include "classfile/vmSymbols.hpp"
  50 #include "gc/shared/collectedHeap.hpp"
  51 #include "gc/shared/gcLocker.hpp"
  52 #include "gc/shared/gcVMOperations.hpp"
  53 #include "logging/log.hpp"
  54 #include "logging/logStream.hpp"
  55 #include "memory/iterator.inline.hpp"
  56 #include "memory/resourceArea.hpp"
  57 #include "memory/universe.hpp"
  58 #include "oops/compressedOops.inline.hpp"
  59 #include "oops/fieldStreams.inline.hpp"
  60 #include "oops/objArrayOop.inline.hpp"
  61 #include "oops/oop.inline.hpp"
  62 #include "oops/oopHandle.inline.hpp"
  63 #include "oops/typeArrayOop.inline.hpp"
  64 #include "prims/jvmtiExport.hpp"
  65 #include "runtime/arguments.hpp"
  66 #include "runtime/fieldDescriptor.inline.hpp"
  67 #include "runtime/init.hpp"
  68 #include "runtime/javaCalls.hpp"
  69 #include "runtime/mutexLocker.hpp"
  70 #include "runtime/safepointVerifiers.hpp"
  71 #include "utilities/bitMap.inline.hpp"
  72 #include "utilities/copy.hpp"
  73 #if INCLUDE_G1GC
  74 #include "gc/g1/g1CollectedHeap.hpp"
  75 #endif
  76 
  77 #if INCLUDE_CDS_JAVA_HEAP
  78 
  79 struct ArchivableStaticFieldInfo {
  80   const char* klass_name;
  81   const char* field_name;
  82   InstanceKlass* klass;
  83   int offset;
  84   BasicType type;
  85 
  86   ArchivableStaticFieldInfo(const char* k, const char* f)
  87   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  88 
  89   bool valid() {
  90     return klass_name != nullptr;
  91   }
  92 };
  93 
  94 class HeapShared::ContextMark : public StackObj {
  95   ResourceMark rm;
  96 public:
  97   ContextMark(const char* c) : rm{} {
  98     _context->push(c);
  99   }
 100   ~ContextMark() {
 101     _context->pop();
 102   }
 103 };
 104 
 105 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
 106 
 107 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
 108 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
 109 size_t HeapShared::_total_obj_count;
 110 size_t HeapShared::_total_obj_size;
 111 
 112 #ifndef PRODUCT
 113 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 114 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 115 static const char* _test_class_name = nullptr;
 116 static Klass* _test_class = nullptr;
 117 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 118 #endif
 119 
 120 
 121 //
 122 // If you add new entries to the following tables, you should know what you're doing!
 123 //
 124 
 125 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 126   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 127   {"java/lang/Long$LongCache",                    "archivedCache"},
 128   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 129   {"java/lang/Short$ShortCache",                  "archivedCache"},
 130   {"java/lang/Character$CharacterCache",          "archivedCache"},
 131   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 132   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 133   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 134   {"java/util/ImmutableCollections",              "archivedObjects"},
 135   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 136   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 137   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 138   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
 139 
 140 #ifndef PRODUCT
 141   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 142 #endif
 143   {nullptr, nullptr},
 144 };
 145 
 146 // full module graph
 147 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 148   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 149   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 150   {"java/lang/Module$ArchivedData",               "archivedData"},
 151   {nullptr, nullptr},
 152 };
 153 
 154 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 155 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 156 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
 157 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
 158 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
 159 int HeapShared::_root_segment_max_size_elems;
 160 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 161 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
 162 
 163 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 164   for (int i = 0; fields[i].valid(); i++) {
 165     if (fields[i].klass == ik) {
 166       return true;
 167     }
 168   }
 169   return false;
 170 }
 171 
 172 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 173   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 174          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 175 }
 176 
 177 oop HeapShared::CachedOopInfo::orig_referrer() const {
 178   return _orig_referrer.resolve();
 179 }
 180 
 181 void HeapShared::rehash_archived_object_cache() {
 182   if (!CDSConfig::is_dumping_heap()) {
 183     return;
 184   }
 185   assert(SafepointSynchronize::is_at_safepoint() ||
 186          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
 187 
 188   ArchivedObjectCache* new_cache =
 189       new (mtClass)ArchivedObjectCache(archived_object_cache()->table_size(), MAX_TABLE_SIZE);
 190 
 191   archived_object_cache()->iterate_all([&](OopHandle o, CachedOopInfo& info) {
 192     new_cache->put_when_absent(o, info);
 193   });
 194 
 195   delete _archived_object_cache;
 196   _archived_object_cache = new_cache;
 197 }
 198 
 199 unsigned HeapShared::oop_hash(oop const& p) {
 200   assert(SafepointSynchronize::is_at_safepoint() ||
 201          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
 202   // Do not call p->identity_hash() as that will update the
 203   // object header.
 204   return primitive_hash(cast_from_oop<intptr_t>(p));
 205 }
 206 
 207 unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
 208   return oop_hash(oh.resolve());
 209 }
 210 
 211 unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
 212   oop o = oh.resolve();
 213   if (o == nullptr) {
 214     return 0;
 215   } else {
 216     return o->identity_hash();
 217   }
 218 }
 219 
 220 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 221   return a.resolve() == b.resolve();
 222 }
 223 
 224 static void reset_states(oop obj, TRAPS) {
 225   Handle h_obj(THREAD, obj);
 226   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 227   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 228   Symbol* method_sig = vmSymbols::void_method_signature();
 229 
 230   while (klass != nullptr) {
 231     Method* method = klass->find_method(method_name, method_sig);
 232     if (method != nullptr) {
 233       assert(method->is_private(), "must be");
 234       if (log_is_enabled(Debug, aot)) {
 235         ResourceMark rm(THREAD);
 236         log_debug(aot)("  calling %s", method->name_and_sig_as_C_string());
 237       }
 238       JavaValue result(T_VOID);
 239       JavaCalls::call_special(&result, h_obj, klass,
 240                               method_name, method_sig, CHECK);
 241     }
 242     klass = klass->java_super();
 243   }

 255   // to keep track of resources, etc, loaded by the null class loader.
 256   //
 257   // Note, this object is non-null, and is not the same as
 258   // ClassLoaderData::the_null_class_loader_data()->class_loader(),
 259   // which is null.
 260   log_debug(aot)("Resetting boot loader");
 261   JavaValue result(T_OBJECT);
 262   JavaCalls::call_static(&result,
 263                          vmClasses::jdk_internal_loader_ClassLoaders_klass(),
 264                          vmSymbols::bootLoader_name(),
 265                          vmSymbols::void_BuiltinClassLoader_signature(),
 266                          CHECK);
 267   Handle boot_loader(THREAD, result.get_oop());
 268   reset_states(boot_loader(), CHECK);
 269 }
 270 
 271 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 272 
 273 bool HeapShared::has_been_archived(oop obj) {
 274   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 275   OopHandle oh(&obj);
 276   return archived_object_cache()->get(oh) != nullptr;
 277 }
 278 
 279 int HeapShared::append_root(oop obj) {
 280   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 281   if (obj != nullptr) {
 282     assert(has_been_archived(obj), "must be");
 283   }
 284   // No GC should happen since we aren't scanning _pending_roots.
 285   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 286 
 287   OopHandle oh(Universe::vm_global(), obj);
 288   return _pending_roots->append(oh);
 289 }
 290 
 291 objArrayOop HeapShared::root_segment(int segment_idx) {
 292   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
 293     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 294   } else {
 295     assert(CDSConfig::is_using_archive(), "must be");
 296   }
 297 
 298   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 299   assert(segment != nullptr, "should have been initialized");
 300   return segment;
 301 }
 302 
 303 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
 304     36137, // prime number
 305     AnyObj::C_HEAP,
 306     mtClassShared,
 307     HeapShared::oop_handle_hash,
 308     HeapShared::oop_handle_equals> {};
 309 
 310 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
 311 
 312 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
 313   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 314   if (_orig_to_scratch_object_table == nullptr) {
 315     _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
 316   }
 317 
 318   OopHandle orig_h(Universe::vm_global(), orig_obj);
 319   OopHandle scratch_h(Universe::vm_global(), scratch_obj);
 320   _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
 321 }
 322 
 323 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
 324   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 325   if (_orig_to_scratch_object_table != nullptr) {
 326     OopHandle orig(&orig_obj);
 327     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 328     if (v != nullptr) {
 329       return v->resolve();
 330     }
 331   }
 332   return nullptr;
 333 }
 334 
 335 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
 336 // to Strings and MH oops.
 337 //
 338 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
 339 // and are accssed vis AOTCacheAccess::get_archived_object(int).
 340 struct PermanentOopInfo {
 341   int _index;       // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
 342   int _heap_offset; // Offset of the object from the bottom of the archived heap.
 343   PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
 344 };
 345 
 346 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
 347     36137, // prime number
 348     AnyObj::C_HEAP,
 349     mtClassShared,
 350     HeapShared::oop_handle_hash,
 351     HeapShared::oop_handle_equals> {};
 352 
 353 static int _dumptime_permanent_oop_count = 0;
 354 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
 355 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
 356 
 357 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
 358 // so we can remember their offset (from the bottom of the archived heap).
 359 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
 360   assert_at_safepoint();
 361   if (_dumptime_permanent_oop_table == nullptr) {
 362     _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
 363   }
 364 
 365   PermanentOopInfo info(-1, offset);
 366   OopHandle oh(Universe::vm_global(), obj);
 367   _dumptime_permanent_oop_table->put_when_absent(oh, info);
 368 }
 369 
 370 // A permanent index is assigned to an archived object ONLY when
 371 // the AOT compiler calls this function.
 372 int HeapShared::get_archived_object_permanent_index(oop obj) {
 373   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 374 
 375   if (!CDSConfig::is_dumping_heap()) {
 376     return -1; // Called by the Leyden old workflow
 377   }
 378   if (_dumptime_permanent_oop_table == nullptr) {
 379     return -1;
 380   }
 381 
 382   if (_orig_to_scratch_object_table != nullptr) {
 383     OopHandle orig(&obj);
 384     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 385     if (v != nullptr) {
 386       obj = v->resolve();
 387     }
 388   }
 389 
 390   OopHandle tmp(&obj);
 391   PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
 392   if (info == nullptr) {
 393     return -1;
 394   } else {
 395     if (info->_index < 0) {
 396       info->_index = _dumptime_permanent_oop_count++;
 397     }
 398     return info->_index;
 399   }
 400 }
 401 
 402 oop HeapShared::get_archived_object(int permanent_index) {
 403   assert(permanent_index >= 0, "sanity");
 404   assert(ArchiveHeapLoader::is_in_use(), "sanity");
 405   assert(_runtime_permanent_oops != nullptr, "sanity");
 406 
 407   return _runtime_permanent_oops->at(permanent_index).resolve();
 408 }
 409 
 410 // Remember all archived heap objects that have a permanent index.
 411 //   table[i] = offset of oop whose permanent index is i.
 412 void CachedCodeDirectoryInternal::dumptime_init_internal() {
 413   const int count = _dumptime_permanent_oop_count;
 414   if (count == 0) {
 415     // Avoid confusing CDS code with zero-sized tables, just return.
 416     log_info(cds)("No permanent oops");
 417     _permanent_oop_count = count;
 418     _permanent_oop_offsets = nullptr;
 419     return;
 420   }
 421 
 422   int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
 423   for (int i = 0; i < count; i++) {
 424     table[count] = -1;
 425   }
 426   _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
 427     int index = info._index;
 428     if (index >= 0) {
 429       assert(index < count, "sanity");
 430       table[index] = info._heap_offset;
 431     }
 432     return true; // continue
 433   });
 434 
 435   for (int i = 0; i < count; i++) {
 436     assert(table[i] >= 0, "must be");
 437   }
 438 
 439   log_info(cds)("Dumped %d permanent oops", count);
 440 
 441   _permanent_oop_count = count;
 442   AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
 443 }
 444 
 445 // This is called during the bootstrap of the production run, before any GC can happen.
 446 // Record each permanent oop in a OopHandle for GC safety.
 447 void CachedCodeDirectoryInternal::runtime_init_internal() {
 448   int count = _permanent_oop_count;
 449   int* table = _permanent_oop_offsets;
 450   _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
 451   for (int i = 0; i < count; i++) {
 452     oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
 453     OopHandle oh(Universe::vm_global(), obj);
 454     _runtime_permanent_oops->append(oh);
 455   }
 456 };
 457 
 458 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 459   assert(_root_segment_max_size_elems > 0, "sanity");
 460 
 461   // Try to avoid divisions for the common case.
 462   if (idx < _root_segment_max_size_elems) {
 463     seg_idx = 0;
 464     int_idx = idx;
 465   } else {
 466     seg_idx = idx / _root_segment_max_size_elems;
 467     int_idx = idx % _root_segment_max_size_elems;
 468   }
 469 
 470   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 471          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 472 }
 473 
 474 // Returns an objArray that contains all the roots of the archived objects
 475 oop HeapShared::get_root(int index, bool clear) {
 476   assert(index >= 0, "sanity");
 477   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 499   }
 500 }
 501 
 502 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
 503   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 504 
 505   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 506   if (has_been_archived(obj)) {
 507     return true;
 508   }
 509 
 510   if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
 511     log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
 512                          p2i(obj), obj->size());
 513     debug_trace();
 514     return false;
 515   } else {
 516     count_allocation(obj->size());
 517     ArchiveHeapWriter::add_source_obj(obj);
 518     CachedOopInfo info = make_cached_oop_info(obj, referrer);
 519 
 520     OopHandle oh(Universe::vm_global(), obj);
 521     archived_object_cache()->put_when_absent(oh, info);
 522     archived_object_cache()->maybe_grow();
 523     mark_native_pointers(obj);
 524 
 525     Klass* k = obj->klass();
 526     if (k->is_instance_klass()) {
 527       // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
 528       // This ensures that during the production run, whenever Java code sees a cached object
 529       // of type X, we know that X is already initialized. (see TODO comment below ...)
 530 
 531       if (InstanceKlass::cast(k)->is_enum_subclass()
 532           // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
 533           // we must store them as AOT-initialized.
 534           || (subgraph_info == _dump_time_special_subgraph))
 535           // TODO: we do this only for the special subgraph for now. Extending this to
 536           // other subgraphs would require more refactoring of the core library (such as
 537           // move some initialization logic into runtimeSetup()).
 538           //
 539           // For the other subgraphs, we have a weaker mechanism to ensure that
 540           // all classes in a subgraph are initialized before the subgraph is programmatically
 541           // returned from jdk.internal.misc.CDS::initializeFromArchive().

 604     OopHandle* handle = get(ptr);
 605     if (handle != nullptr) {
 606       handle->release(Universe::vm_global());
 607       remove(ptr);
 608     }
 609   }
 610 };
 611 
 612 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 613   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 614     _scratch_objects_table->set_oop(src, dest);
 615   }
 616 }
 617 
 618 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 619   return (objArrayOop)_scratch_objects_table->get_oop(src);
 620 }
 621 
 622 void HeapShared::init_dumping() {
 623   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 624   _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
 625 }
 626 
 627 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 628   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 629     BasicType bt = (BasicType)i;
 630     if (!is_reference_type(bt)) {
 631       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 632       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 633       track_scratch_object(Universe::java_mirror(bt), m);
 634     }
 635   }
 636 }
 637 
 638 // Given java_mirror that represents a (primitive or reference) type T,
 639 // return the "scratch" version that represents the same type T.
 640 // Note that if java_mirror will be returned if it's already a
 641 // scratch mirror.
 642 //
 643 // See java_lang_Class::create_scratch_mirror() for more info.
 644 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 645   assert(java_lang_Class::is_instance(java_mirror), "must be");
 646 
 647   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 648     BasicType bt = (BasicType)i;
 649     if (!is_reference_type(bt)) {
 650       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 651         return java_mirror;
 652       }
 653     }
 654   }
 655 
 656   if (java_lang_Class::is_primitive(java_mirror)) {
 657     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 658   } else {
 659     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 660   }
 661 }
 662 
 663 oop HeapShared::scratch_java_mirror(BasicType t) {
 664   assert((uint)t < T_VOID+1, "range check");
 665   assert(!is_reference_type(t), "sanity");
 666   return _scratch_basic_type_mirrors[t].resolve();
 667 }
 668 
 669 oop HeapShared::scratch_java_mirror(Klass* k) {
 670   return _scratch_objects_table->get_oop(k);
 671 }
 672 
 673 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
 674   track_scratch_object(k->java_mirror(), mirror);
 675   _scratch_objects_table->set_oop(k, mirror);
 676 }
 677 
 678 void HeapShared::remove_scratch_objects(Klass* k) {
 679   // Klass is being deallocated. Java mirror can still be alive, and it should not
 680   // point to dead klass. We need to break the link from mirror to the Klass.
 681   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 682   oop mirror = _scratch_objects_table->get_oop(k);
 683   if (mirror != nullptr) {
 684     java_lang_Class::set_klass(mirror, nullptr);
 685   }
 686   _scratch_objects_table->remove_oop(k);
 687   if (k->is_instance_klass()) {
 688     _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
 689   }
 690   if (mirror != nullptr) {
 691     OopHandle tmp(&mirror);
 692     OopHandle* v = _orig_to_scratch_object_table->get(tmp);
 693     if (v != nullptr) {
 694       oop scratch_mirror = v->resolve();
 695       java_lang_Class::set_klass(scratch_mirror, nullptr);
 696       _orig_to_scratch_object_table->remove(tmp);
 697     }
 698   }
 699 }
 700 
 701 //TODO: we eventually want a more direct test for these kinds of things.
 702 //For example the JVM could record some bit of context from the creation
 703 //of the klass, such as who called the hidden class factory.  Using
 704 //string compares on names is fragile and will break as soon as somebody
 705 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 706 //related ideas about marking AOT-related classes.
 707 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 708   return ik->is_hidden() &&
 709     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 710      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 711      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 712      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 713 }
 714 
 715 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 716   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 717 }
 718 

 838   assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
 839   StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
 840 }
 841 
 842 int HeapShared::archive_exception_instance(oop exception) {
 843   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 844   assert(success, "sanity");
 845   return append_root(exception);
 846 }
 847 
 848 void HeapShared::mark_native_pointers(oop orig_obj) {
 849   if (java_lang_Class::is_instance(orig_obj)) {
 850     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
 851     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
 852   } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
 853     ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
 854   }
 855 }
 856 
 857 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
 858   OopHandle oh(&src_obj);
 859   CachedOopInfo* info = archived_object_cache()->get(oh);
 860   assert(info != nullptr, "must be");
 861   has_oop_pointers = info->has_oop_pointers();
 862   has_native_pointers = info->has_native_pointers();
 863 }
 864 
 865 void HeapShared::set_has_native_pointers(oop src_obj) {
 866   OopHandle oh(&src_obj);
 867   CachedOopInfo* info = archived_object_cache()->get(oh);
 868   assert(info != nullptr, "must be");
 869   info->set_has_native_pointers();
 870 }
 871 
 872 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
 873 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
 874 void HeapShared::start_scanning_for_oops() {
 875   {
 876     NoSafepointVerifier nsv;
 877 
 878     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 879     // for convenience.
 880     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
 881     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
 882 
 883     // Cache for recording where the archived objects are copied to
 884     create_archived_object_cache();
 885 
 886     if (UseCompressedOops || UseG1GC) {
 887       aot_log_info(aot)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 888                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 889                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 890                     UseCompressedOops ? p2i(CompressedOops::end()) :
 891                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 892     }
 893 
 894     archive_subgraphs();
 895   }
 896 
 897   init_seen_objects_table();
 898   Universe::archive_exception_instances();
 899 }
 900 
 901 void HeapShared::end_scanning_for_oops() {
 902   archive_strings();
 903   delete_seen_objects_table();
 904 }
 905 
 906 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
 907   {
 908     NoSafepointVerifier nsv;
 909     if (!SkipArchiveHeapVerification) {
 910       CDSHeapVerifier::verify();
 911     }
 912     check_special_subgraph_classes();
 913   }
 914 
 915   StringTable::write_shared_table();
 916   GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
 917   for (int i = 0; i < _pending_roots->length(); i++) {
 918     roots->append(_pending_roots->at(i).resolve());
 919   }
 920   ArchiveHeapWriter::write(roots, heap_info);
 921   delete roots;
 922 
 923   ArchiveBuilder::OtherROAllocMark mark;
 924   write_subgraph_info_table();
 925 }
 926 
 927 void HeapShared::scan_java_mirror(oop orig_mirror) {
 928   oop m = scratch_java_mirror(orig_mirror);
 929   if (m != nullptr) { // nullptr if for custom class loader
 930     copy_java_mirror_hashcode(orig_mirror, m);
 931     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 932     assert(success, "sanity");
 933   }
 934 }
 935 
 936 void HeapShared::scan_java_class(Klass* orig_k) {
 937   scan_java_mirror(orig_k->java_mirror());
 938 
 939   if (orig_k->is_instance_klass()) {
 940     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
 941     orig_ik->constants()->prepare_resolved_references_for_archiving();

1307                           which, k->external_name());
1308       FlagSetting fs1(VerifyBeforeGC, true);
1309       FlagSetting fs2(VerifyDuringGC, true);
1310       FlagSetting fs3(VerifyAfterGC,  true);
1311       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1312     }
1313   }
1314 }
1315 
1316 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1317 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1318 //
1319 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1320 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1321 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1322 void HeapShared::resolve_classes(JavaThread* current) {
1323   assert(CDSConfig::is_using_archive(), "runtime only!");
1324   if (!ArchiveHeapLoader::is_in_use()) {
1325     return; // nothing to do
1326   }
1327 
1328   if (!CDSConfig::is_using_aot_linked_classes()) {
1329     assert( _run_time_special_subgraph != nullptr, "must be");
1330     Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1331     if (klasses != nullptr) {
1332       for (int i = 0; i < klasses->length(); i++) {
1333         Klass* k = klasses->at(i);
1334         ExceptionMark em(current); // no exception can happen here
1335         resolve_or_init(k, /*do_init*/false, current);
1336       }
1337     }
1338   }
1339 
1340   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1341   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1342 }
1343 
1344 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1345   for (int i = 0; fields[i].valid(); i++) {
1346     ArchivableStaticFieldInfo* info = &fields[i];
1347     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1348     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1349     assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1350     resolve_classes_for_subgraph_of(current, k);
1351   }
1352 }
1353 
1354 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1355   JavaThread* THREAD = current;
1356   ExceptionMark em(THREAD);
1357   const ArchivedKlassSubGraphInfoRecord* record =
1358    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1359   if (HAS_PENDING_EXCEPTION) {

1678 };
1679 
1680 // Checks if an oop has any non-null oop fields
1681 class PointsToOopsChecker : public BasicOopIterateClosure {
1682   bool _result;
1683 
1684   template <class T> void check(T *p) {
1685     _result |= (HeapAccess<>::oop_load(p) != nullptr);
1686   }
1687 
1688 public:
1689   PointsToOopsChecker() : _result(false) {}
1690   void do_oop(narrowOop *p) { check(p); }
1691   void do_oop(      oop *p) { check(p); }
1692   bool result() { return _result; }
1693 };
1694 
1695 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1696   PointsToOopsChecker points_to_oops_checker;
1697   obj->oop_iterate(&points_to_oops_checker);
1698   return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1699 }
1700 
1701 void HeapShared::init_box_classes(TRAPS) {
1702   if (ArchiveHeapLoader::is_in_use()) {
1703     vmClasses::Boolean_klass()->initialize(CHECK);
1704     vmClasses::Character_klass()->initialize(CHECK);
1705     vmClasses::Float_klass()->initialize(CHECK);
1706     vmClasses::Double_klass()->initialize(CHECK);
1707     vmClasses::Byte_klass()->initialize(CHECK);
1708     vmClasses::Short_klass()->initialize(CHECK);
1709     vmClasses::Integer_klass()->initialize(CHECK);
1710     vmClasses::Long_klass()->initialize(CHECK);
1711     vmClasses::Void_klass()->initialize(CHECK);
1712   }
1713 }
1714 
1715 void HeapShared::exit_on_error() {
1716   if (_context != nullptr) {
1717     ResourceMark rm;
1718     LogStream ls(Log(cds, heap)::error());
1719     ls.print_cr("Context");
1720     for (int i = 0; i < _context->length(); i++) {
1721       const char* s = _context->at(i);
1722       ls.print_cr("- %s", s);
1723     }
1724   }
1725   debug_trace();
1726   MetaspaceShared::unrecoverable_writing_error();
1727 }
1728 
1729 // (1) If orig_obj has not been archived yet, archive it.
1730 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1731 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1732 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1733 //     were already archived when this function is called)
1734 bool HeapShared::archive_reachable_objects_from(int level,
1735                                                 KlassSubGraphInfo* subgraph_info,
1736                                                 oop orig_obj) {
1737   assert(orig_obj != nullptr, "must be");
1738   PendingOopStack stack;
1739   stack.push(PendingOop(orig_obj, nullptr, level));
1740 
1741   while (stack.length() > 0) {
1742     PendingOop po = stack.pop();
1743     _object_being_archived = po;
1744     bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1745     _object_being_archived = PendingOop();
1746 
1747     if (!status) {
1748       // Don't archive a subgraph root that's too big. For archives static fields, that's OK

1824 
1825   bool already_archived = has_been_archived(orig_obj);
1826   bool record_klasses_only = already_archived;
1827   if (!already_archived) {
1828     ++_num_new_archived_objs;
1829     if (!archive_object(orig_obj, referrer, subgraph_info)) {
1830       // Skip archiving the sub-graph referenced from the current entry field.
1831       ResourceMark rm;
1832       log_error(aot, heap)(
1833         "Cannot archive the sub-graph referenced from %s object ("
1834         PTR_FORMAT ") size %zu, skipped.",
1835         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1836       if (level == 1) {
1837         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1838         // as the Java code will take care of initializing this field dynamically.
1839         return false;
1840       } else {
1841         // We don't know how to handle an object that has been archived, but some of its reachable
1842         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1843         // we have a real use case.
1844         exit_on_error();
1845       }
1846     }
1847   }
1848 
1849   Klass *orig_k = orig_obj->klass();
1850   subgraph_info->add_subgraph_object_klass(orig_k);
1851 
1852   {
1853     // Find all the oops that are referenced by orig_obj, push them onto the stack
1854     // so we can work on them next.
1855     ResourceMark rm;
1856     OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1857     orig_obj->oop_iterate(&pusher);
1858   }
1859 
1860   if (CDSConfig::is_initing_classes_at_dump_time()) {
1861     // The enum klasses are archived with aot-initialized mirror.
1862     // See AOTClassInitializer::can_archive_initialized_mirror().
1863   } else {
1864     if (CDSEnumKlass::is_enum_obj(orig_obj)) {

2278 
2279 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2280                                           bool is_full_module_graph) {
2281   _num_total_subgraph_recordings = 0;
2282   _num_total_walked_objs = 0;
2283   _num_total_archived_objs = 0;
2284   _num_total_recorded_klasses = 0;
2285   _num_total_verifications = 0;
2286 
2287   // For each class X that has one or more archived fields:
2288   // [1] Dump the subgraph of each archived field
2289   // [2] Create a list of all the class of the objects that can be reached
2290   //     by any of these static fields.
2291   //     At runtime, these classes are initialized before X's archived fields
2292   //     are restored by HeapShared::initialize_from_archived_subgraph().
2293   for (int i = 0; fields[i].valid(); ) {
2294     ArchivableStaticFieldInfo* info = &fields[i];
2295     const char* klass_name = info->klass_name;
2296     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2297 
2298     ContextMark cm(klass_name);
2299     // If you have specified consecutive fields of the same klass in
2300     // fields[], these will be archived in the same
2301     // {start_recording_subgraph ... done_recording_subgraph} pass to
2302     // save time.
2303     for (; fields[i].valid(); i++) {
2304       ArchivableStaticFieldInfo* f = &fields[i];
2305       if (f->klass_name != klass_name) {
2306         break;
2307       }
2308 
2309       ContextMark cm(f->field_name);
2310       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2311                                                   f->offset, f->field_name);
2312     }
2313     done_recording_subgraph(info->klass, klass_name);
2314   }
2315 
2316   log_info(aot, heap)("Archived subgraph records = %d",
2317                       _num_total_subgraph_recordings);
2318   log_info(aot, heap)("  Walked %d objects", _num_total_walked_objs);
2319   log_info(aot, heap)("  Archived %d objects", _num_total_archived_objs);
2320   log_info(aot, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2321 
2322 #ifndef PRODUCT
2323   for (int i = 0; fields[i].valid(); i++) {
2324     ArchivableStaticFieldInfo* f = &fields[i];
2325     verify_subgraph_from_static_field(f->klass, f->offset);
2326   }
2327   log_info(aot, heap)("  Verified %d references", _num_total_verifications);
2328 #endif
2329 }
2330 
2331 // Keep track of the contents of the archived interned string table. This table
2332 // is used only by CDSHeapVerifier.
2333 void HeapShared::add_to_dumped_interned_strings(oop string) {
2334   assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2335   assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2336   bool created;
2337   _dumped_interned_strings->put_if_absent(string, true, &created);
2338   if (created) {
2339     // Prevent string deduplication from changing the value field to
2340     // something not in the archive.
2341     java_lang_String::set_deduplication_forbidden(string);
2342     _dumped_interned_strings->maybe_grow();
2343   }
2344 }
2345 
2346 bool HeapShared::is_dumped_interned_string(oop o) {
2347   return _dumped_interned_strings->get(o) != nullptr;
2348 }
2349 
2350 // These tables should be used only within the CDS safepoint, so
2351 // delete them before we exit the safepoint. Otherwise the table will
2352 // contain bad oops after a GC.
2353 void HeapShared::delete_tables_with_raw_oops() {
2354   assert(_seen_objects_table == nullptr, "should have been deleted");
2355 
2356   delete _dumped_interned_strings;
2357   _dumped_interned_strings = nullptr;
2358 
2359   ArchiveHeapWriter::delete_tables_with_raw_oops();
2360 }
2361 
2362 void HeapShared::debug_trace() {
2363   ResourceMark rm;
2364   oop referrer = _object_being_archived.referrer();
2365   if (referrer != nullptr) {
2366     LogStream ls(Log(aot, heap)::error());
2367     ls.print_cr("Reference trace");
2368     CDSHeapVerifier::trace_to_root(&ls, referrer);
2369   }
2370 }
2371 
2372 #ifndef PRODUCT
2373 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2374 // region. This way we can quickly relocate all the pointers without using
2375 // BasicOopIterateClosure at runtime.
2376 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2377   void* _start;
2378   BitMap *_oopmap;
2379   int _num_total_oops;
2380   int _num_null_oops;
2381  public:
< prev index next >