< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotArtifactFinder.hpp"

  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/aotClassLocation.hpp"
  28 #include "cds/aotCompressedPointers.hpp"

  29 #include "cds/aotLogging.hpp"
  30 #include "cds/aotMappedHeapLoader.hpp"
  31 #include "cds/aotMappedHeapWriter.hpp"
  32 #include "cds/aotMetaspace.hpp"
  33 #include "cds/aotOopChecker.hpp"
  34 #include "cds/aotReferenceObjSupport.hpp"
  35 #include "cds/aotStreamedHeapLoader.hpp"
  36 #include "cds/aotStreamedHeapWriter.hpp"
  37 #include "cds/archiveBuilder.hpp"
  38 #include "cds/archiveUtils.hpp"
  39 #include "cds/cds_globals.hpp"
  40 #include "cds/cdsConfig.hpp"
  41 #include "cds/cdsEnumKlass.hpp"
  42 #include "cds/cdsHeapVerifier.hpp"
  43 #include "cds/heapShared.inline.hpp"
  44 #include "cds/regeneratedClasses.hpp"
  45 #include "classfile/classLoaderData.hpp"
  46 #include "classfile/javaClasses.inline.hpp"
  47 #include "classfile/modules.hpp"
  48 #include "classfile/stringTable.hpp"
  49 #include "classfile/symbolTable.hpp"
  50 #include "classfile/systemDictionary.hpp"
  51 #include "classfile/systemDictionaryShared.hpp"
  52 #include "classfile/vmClasses.hpp"
  53 #include "classfile/vmSymbols.hpp"

  54 #include "gc/shared/collectedHeap.hpp"
  55 #include "gc/shared/gcLocker.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "logging/log.hpp"
  58 #include "logging/logStream.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/fieldStreams.inline.hpp"
  64 #include "oops/objArrayOop.inline.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "oops/oopHandle.inline.hpp"
  67 #include "oops/typeArrayOop.inline.hpp"
  68 #include "prims/jvmtiExport.hpp"
  69 #include "runtime/arguments.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/globals_extension.hpp"
  72 #include "runtime/init.hpp"
  73 #include "runtime/javaCalls.hpp"

 113 #endif
 114 
 115 
 116 //
 117 // If you add new entries to the following tables, you should know what you're doing!
 118 //
 119 
 120 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 121   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 122   {"java/lang/Long$LongCache",                    "archivedCache"},
 123   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 124   {"java/lang/Short$ShortCache",                  "archivedCache"},
 125   {"java/lang/Character$CharacterCache",          "archivedCache"},
 126   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 127   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 128   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 129   {"java/util/ImmutableCollections",              "archivedObjects"},
 130   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 131   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 132   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},

 133 
 134 #ifndef PRODUCT
 135   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 136 #endif
 137   {nullptr, nullptr},
 138 };
 139 
 140 // full module graph
 141 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 142   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 143   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 144   {"java/lang/Module$ArchivedData",               "archivedData"},
 145   {nullptr, nullptr},
 146 };
 147 
 148 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 149 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 150 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
 151 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 152 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;

 166     // Legacy CDS archive support (to be deprecated)
 167     return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 168            is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 169   } else {
 170     return false;
 171   }
 172 }
 173 
 174 oop HeapShared::CachedOopInfo::orig_referrer() const {
 175   return _orig_referrer.resolve();
 176 }
 177 
 178 unsigned HeapShared::oop_hash(oop const& p) {
 179   assert(SafepointSynchronize::is_at_safepoint() ||
 180          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
 181   // Do not call p->identity_hash() as that will update the
 182   // object header.
 183   return primitive_hash(cast_from_oop<intptr_t>(p));
 184 }
 185 
 186 unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
 187   return oop_hash(oh.resolve());
 188 }
 189 
 190 unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {



 191   oop o = oh.resolve();
 192   if (o == nullptr) {
 193     return 0;










 194   } else {
 195     return o->identity_hash();


 196   }
 197 }
 198 
 199 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 200   return a.resolve() == b.resolve();
 201 }
 202 
 203 static void reset_states(oop obj, TRAPS) {
 204   Handle h_obj(THREAD, obj);
 205   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 206   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 207   Symbol* method_sig = vmSymbols::void_method_signature();
 208 
 209   while (klass != nullptr) {
 210     Method* method = klass->find_method(method_name, method_sig);
 211     if (method != nullptr) {
 212       assert(method->is_private(), "must be");
 213       if (log_is_enabled(Debug, aot)) {
 214         ResourceMark rm(THREAD);
 215         log_debug(aot)("  calling %s", method->name_and_sig_as_C_string());

 253 
 254   Klass* weak_ref_key_class = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
 255   precond(weak_ref_key_class != nullptr);
 256 
 257   log_debug(aot)("Calling WeakReferenceKey::ensureDeterministicAOTCache(Object.class)");
 258   JavaValue result(T_BOOLEAN);
 259   JavaCalls::call_static(&result,
 260                          weak_ref_key_class,
 261                          method_name,
 262                          vmSymbols::void_boolean_signature(),
 263                          CHECK);
 264   assert(result.get_jboolean() == false, "sanity");
 265 }
 266 
 267 void HeapShared::prepare_for_archiving(TRAPS) {
 268   reset_archived_object_states(CHECK);
 269   ensure_determinism(CHECK);
 270 }
 271 
 272 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;

 273 
 274 bool HeapShared::is_archived_heap_in_use() {
 275   if (HeapShared::is_loading()) {
 276     if (HeapShared::is_loading_streaming_mode()) {
 277       return AOTStreamedHeapLoader::is_in_use();
 278     } else {
 279       return AOTMappedHeapLoader::is_in_use();
 280     }
 281   }
 282 
 283   return false;
 284 }
 285 
 286 bool HeapShared::can_use_archived_heap() {
 287   FileMapInfo* static_mapinfo = FileMapInfo::current_info();
 288   if (static_mapinfo == nullptr) {
 289     return false;
 290   }
 291   if (!static_mapinfo->has_heap_region()) {
 292     return false;

 367 
 368 void HeapShared::initialize_streaming() {
 369   assert(is_loading_streaming_mode(), "shouldn't call this");
 370   if (can_use_archived_heap()) {
 371     AOTStreamedHeapLoader::initialize();
 372   }
 373 }
 374 
 375 void HeapShared::enable_gc() {
 376   if (AOTStreamedHeapLoader::is_in_use()) {
 377     AOTStreamedHeapLoader::enable_gc();
 378   }
 379 }
 380 
 381 void HeapShared::materialize_thread_object() {
 382   if (AOTStreamedHeapLoader::is_in_use()) {
 383     AOTStreamedHeapLoader::materialize_thread_object();
 384   }
 385 }
 386 
 387 void HeapShared::add_to_dumped_interned_strings(oop string) {
 388   assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
 389   AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
 390   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
 391   assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
 392 }
 393 
 394 void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
 395   if (HeapShared::is_loading()) {
 396     if (HeapShared::is_loading_streaming_mode()) {
 397       // Heap initialization can be done only after vtables are initialized by ReadClosure.
 398       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 399     } else {
 400       // Finish up archived heap initialization. These must be
 401       // done after ReadClosure.
 402       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 403     }
 404   }
 405 }
 406 
















 407 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 408   OopHandle oh(Universe::vm_global(), obj);
 409   CachedOopInfo* result = _archived_object_cache->get(oh);
 410   oh.release(Universe::vm_global());
 411   return result;
 412 }
 413 
 414 bool HeapShared::has_been_archived(oop obj) {
 415   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 416   return get_cached_oop_info(obj) != nullptr;
 417 }
 418 
 419 int HeapShared::append_root(oop obj) {

 420   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 421   if (obj != nullptr) {
 422     assert(has_been_archived(obj), "must be");



































 423   }
 424   // No GC should happen since we aren't scanning _pending_roots.
 425   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 426 
 427   return _pending_roots->append(obj);




 428 }
 429 
 430 oop HeapShared::get_root(int index, bool clear) {
 431   assert(index >= 0, "sanity");
 432   assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
 433 
 434   oop result;
 435   if (HeapShared::is_loading_streaming_mode()) {
 436     result = AOTStreamedHeapLoader::get_root(index);
 437   } else {
 438     assert(HeapShared::is_loading_mapping_mode(), "must be");
 439     result = AOTMappedHeapLoader::get_root(index);
 440   }
 441 
 442   if (clear) {
 443     clear_root(index);
 444   }
 445 
 446   return result;
 447 }
 448 
 449 void HeapShared::finish_materialize_objects() {
 450   if (AOTStreamedHeapLoader::is_in_use()) {
 451     AOTStreamedHeapLoader::finish_materialize_objects();
 452   }
 453 }
 454 
 455 void HeapShared::clear_root(int index) {







 456   assert(index >= 0, "sanity");
 457   assert(CDSConfig::is_using_archive(), "must be");
 458   if (is_archived_heap_in_use()) {
 459     if (log_is_enabled(Debug, aot, heap)) {
 460       log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));
 461     }
 462     if (HeapShared::is_loading_streaming_mode()) {
 463       AOTStreamedHeapLoader::clear_root(index);
 464     } else {
 465       assert(HeapShared::is_loading_mapping_mode(), "must be");
 466       AOTMappedHeapLoader::clear_root(index);
 467     }
 468   }
 469 }
 470 
 471 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
 472   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 473 
 474   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 475   if (has_been_archived(obj)) {

 516         // returned from jdk.internal.misc.CDS::initializeFromArchive().
 517         // See HeapShared::initialize_from_archived_subgraph().
 518     {
 519       AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
 520     }
 521 
 522     if (java_lang_Class::is_instance(obj)) {
 523       Klass* mirror_k = java_lang_Class::as_Klass(obj);
 524       if (mirror_k != nullptr) {
 525         AOTArtifactFinder::add_cached_class(mirror_k);
 526       }
 527     } else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
 528       Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
 529       if (m != nullptr) {
 530         if (RegeneratedClasses::has_been_regenerated(m)) {
 531           m = RegeneratedClasses::get_regenerated_object(m);
 532         }
 533         InstanceKlass* method_holder = m->method_holder();
 534         AOTArtifactFinder::add_cached_class(method_holder);
 535       }




 536     }
 537   }
 538 
 539   if (log_is_enabled(Debug, aot, heap)) {
 540     ResourceMark rm;
 541     LogTarget(Debug, aot, heap) log;
 542     LogStream out(log);
 543     out.print("Archived heap object " PTR_FORMAT " : %s ",
 544               p2i(obj), obj->klass()->external_name());
 545     if (java_lang_Class::is_instance(obj)) {
 546       Klass* k = java_lang_Class::as_Klass(obj);
 547       if (k != nullptr) {
 548         out.print("%s", k->external_name());
 549       } else {
 550         out.print("primitive");
 551       }
 552     }
 553     out.cr();
 554   }
 555 

 583       remove(ptr);
 584     }
 585   }
 586 };
 587 
 588 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 589   if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
 590     // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
 591     // Ignore it, as this class will be excluded from the AOT config.
 592     return;
 593   }
 594   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 595     _scratch_objects_table->set_oop(src, dest);
 596   }
 597 }
 598 
 599 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 600   return (objArrayOop)_scratch_objects_table->get_oop(src);
 601 }
 602 
 603  void HeapShared::init_dumping() {
 604    _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 605    _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);

 606 }
 607 
 608 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 609   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 610     BasicType bt = (BasicType)i;
 611     if (!is_reference_type(bt)) {
 612       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 613       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 614     }
 615   }
 616 }
 617 
 618 // Given java_mirror that represents a (primitive or reference) type T,
 619 // return the "scratch" version that represents the same type T. Note
 620 // that java_mirror will be returned if the mirror is already a scratch mirror.
 621 //
 622 // See java_lang_Class::create_scratch_mirror() for more info.
 623 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 624   assert(java_lang_Class::is_instance(java_mirror), "must be");
 625 

 702   if (RegeneratedClasses::is_regenerated_object(ik)) {
 703     InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
 704     precond(orig_ik->is_initialized());
 705     orig_mirror = orig_ik->java_mirror();
 706   } else {
 707     precond(ik->is_initialized());
 708     orig_mirror = ik->java_mirror();
 709   }
 710 
 711   oop m = scratch_java_mirror(ik);
 712   int nfields = 0;
 713   for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
 714     if (fs.access_flags().is_static()) {
 715       fieldDescriptor& fd = fs.field_descriptor();
 716       int offset = fd.offset();
 717       switch (fd.field_type()) {
 718       case T_OBJECT:
 719       case T_ARRAY:
 720         {
 721           oop field_obj = orig_mirror->obj_field(offset);
 722           if (offset == java_lang_Class::reflection_data_offset()) {
 723             // Class::reflectData use SoftReference, which cannot be archived. Set it
 724             // to null and it will be recreated at runtime.
 725             field_obj = nullptr;
 726           }
 727           m->obj_field_put(offset, field_obj);
 728           if (field_obj != nullptr) {
 729             bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
 730             assert(success, "sanity");
 731           }
 732         }
 733         break;
 734       case T_BOOLEAN:
 735         m->bool_field_put(offset, orig_mirror->bool_field(offset));
 736         break;
 737       case T_BYTE:
 738         m->byte_field_put(offset, orig_mirror->byte_field(offset));
 739         break;
 740       case T_SHORT:
 741         m->short_field_put(offset, orig_mirror->short_field(offset));
 742         break;
 743       case T_CHAR:
 744         m->char_field_put(offset, orig_mirror->char_field(offset));
 745         break;
 746       case T_INT:

 781   // We need to retain the identity_hash, because it may have been used by some hashtables
 782   // in the shared heap.
 783   if (!orig_mirror->fast_no_hash_check()) {
 784     intptr_t src_hash = orig_mirror->identity_hash();
 785     if (UseCompactObjectHeaders) {
 786       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 787       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 788     } else {
 789       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 790     }
 791     assert(scratch_m->mark().is_unlocked(), "sanity");
 792 
 793     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 794     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 795   }
 796 
 797   if (CDSConfig::is_dumping_aot_linked_classes()) {
 798     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 799     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 800   }








 801 }
 802 
 803 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 804   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 805     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 806     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 807       return HeapShared::scratch_resolved_references(src_ik->constants());
 808     }
 809   }
 810   return nullptr;
 811 }
 812 
 813 int HeapShared::archive_exception_instance(oop exception) {
 814   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 815   assert(success, "sanity");
 816   return append_root(exception);
 817 }
 818 
 819 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
 820   OopHandle oh(&src_obj);

 852                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 853     }
 854 
 855     archive_subgraphs();
 856   }
 857 
 858   init_seen_objects_table();
 859   Universe::archive_exception_instances();
 860 }
 861 
 862 void HeapShared::end_scanning_for_oops() {
 863   if (is_writing_mapping_mode()) {
 864     StringTable::init_shared_table();
 865   }
 866   delete_seen_objects_table();
 867 }
 868 
 869 void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
 870   {
 871     NoSafepointVerifier nsv;
 872     CDSHeapVerifier::verify();


 873     check_special_subgraph_classes();
 874   }
 875 
 876   if (HeapShared::is_writing_mapping_mode()) {
 877     StringTable::write_shared_table();
 878     AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
 879   } else {
 880     assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
 881     AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
 882   }
 883 
 884   ArchiveBuilder::OtherROAllocMark mark;
 885   write_subgraph_info_table();





 886 }
 887 
 888 void HeapShared::scan_java_mirror(oop orig_mirror) {
 889   oop m = scratch_java_mirror(orig_mirror);
 890   if (m != nullptr) { // nullptr if for custom class loader
 891     copy_java_mirror(orig_mirror, m);
 892     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 893     assert(success, "sanity");






 894   }
 895 }
 896 
 897 void HeapShared::scan_java_class(Klass* orig_k) {
 898   scan_java_mirror(orig_k->java_mirror());
 899 
 900   if (orig_k->is_instance_klass()) {
 901     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
 902     orig_ik->constants()->prepare_resolved_references_for_archiving();
 903     objArrayOop rr = get_archived_resolved_references(orig_ik);
 904     if (rr != nullptr) {
 905       bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
 906       assert(success, "must be");
 907     }
 908   }
 909 }
 910 
 911 void HeapShared::archive_subgraphs() {
 912   assert(CDSConfig::is_dumping_heap(), "must be");
 913 

 938                                                   &created);
 939   assert(created, "must not initialize twice");
 940   return info;
 941 }
 942 
 943 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
 944   assert(CDSConfig::is_dumping_heap(), "dump time only");
 945   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
 946   assert(info != nullptr, "must have been initialized");
 947   return info;
 948 }
 949 
 950 // Add an entry field to the current KlassSubGraphInfo.
 951 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
 952   assert(CDSConfig::is_dumping_heap(), "dump time only");
 953   if (_subgraph_entry_fields == nullptr) {
 954     _subgraph_entry_fields =
 955       new (mtClass) GrowableArray<int>(10, mtClass);
 956   }
 957   _subgraph_entry_fields->append(static_field_offset);
 958   _subgraph_entry_fields->append(HeapShared::append_root(v));




 959 }
 960 
 961 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
 962 // Only objects of boot classes can be included in sub-graph.
 963 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
 964   assert(CDSConfig::is_dumping_heap(), "dump time only");
 965 
 966   if (_subgraph_object_klasses == nullptr) {
 967     _subgraph_object_klasses =
 968       new (mtClass) GrowableArray<Klass*>(50, mtClass);
 969   }
 970 
 971   if (_k == orig_k) {
 972     // Don't add the Klass containing the sub-graph to it's own klass
 973     // initialization list.
 974     return;
 975   }
 976 
 977   if (orig_k->is_instance_klass()) {
 978 #ifdef ASSERT

1280     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1281     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1282     assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1283     resolve_classes_for_subgraph_of(current, k);
1284   }
1285 }
1286 
1287 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1288   JavaThread* THREAD = current;
1289   ExceptionMark em(THREAD);
1290   const ArchivedKlassSubGraphInfoRecord* record =
1291    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1292   if (HAS_PENDING_EXCEPTION) {
1293    CLEAR_PENDING_EXCEPTION;
1294   }
1295   if (record == nullptr) {
1296    clear_archived_roots_of(k);
1297   }
1298 }
1299 










1300 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1301   if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1302     resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1303     resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1304     resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1305     resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1306     resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1307     resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1308     resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1309   }
1310 }
1311 
1312 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1313 //   - interned strings
1314 //   - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1315 //   - ConstantPool::resolved_references()
1316 //   - Universe::<xxx>_exception_instance()
1317 //
1318 // For example, if this enum class is initialized at AOT cache assembly time:
1319 //
1320 //    enum Fruit {
1321 //       APPLE, ORANGE, BANANA;
1322 //       static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1323 //   }
1324 //
1325 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1326 // should be initialized before any Java code can access the Fruit class. Note that
1327 // HashSet itself doesn't necessary need to be an aot-initialized class.
1328 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {

1486       ik->initialize(CHECK);
1487     } else if (k->is_objArray_klass()) {
1488       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1489       oak->initialize(CHECK);
1490     }
1491   }
1492 }
1493 
1494 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1495   verify_the_heap(k, "before");
1496 
1497   Array<int>* entry_field_records = record->entry_field_records();
1498   if (entry_field_records != nullptr) {
1499     int efr_len = entry_field_records->length();
1500     assert(efr_len % 2 == 0, "sanity");
1501     for (int i = 0; i < efr_len; i += 2) {
1502       int field_offset = entry_field_records->at(i);
1503       int root_index = entry_field_records->at(i+1);
1504       // Load the subgraph entry fields from the record and store them back to
1505       // the corresponding fields within the mirror.
1506       oop v = get_root(root_index, /*clear=*/true);





1507       oop m = k->java_mirror();
1508       if (k->has_aot_initialized_mirror()) {
1509         assert(v == m->obj_field(field_offset), "must be aot-initialized");
1510       } else {
1511         m->obj_field_put(field_offset, v);
1512       }
1513       log_debug(aot, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1514     }
1515 
1516     // Done. Java code can see the archived sub-graphs referenced from k's
1517     // mirror after this point.
1518     if (log_is_enabled(Info, aot, heap)) {
1519       ResourceMark rm;
1520       log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1521                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1522                           k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1523     }
1524   }
1525 
1526   verify_the_heap(k, "after ");

1894     verify_subgraph_from(f);
1895   }
1896 }
1897 
1898 void HeapShared::verify_subgraph_from(oop orig_obj) {
1899   if (!has_been_archived(orig_obj)) {
1900     // It's OK for the root of a subgraph to be not archived. See comments in
1901     // archive_reachable_objects_from().
1902     return;
1903   }
1904 
1905   // Verify that all objects reachable from orig_obj are archived.
1906   init_seen_objects_table();
1907   verify_reachable_objects_from(orig_obj);
1908   delete_seen_objects_table();
1909 }
1910 
1911 void HeapShared::verify_reachable_objects_from(oop obj) {
1912   _num_total_verifications ++;
1913   if (java_lang_Class::is_instance(obj)) {





1914     obj = scratch_java_mirror(obj);
1915     assert(obj != nullptr, "must be");
1916   }
1917   if (!has_been_seen_during_subgraph_recording(obj)) {
1918     set_has_been_seen_during_subgraph_recording(obj);
1919     assert(has_been_archived(obj), "must be");
1920     VerifySharedOopClosure walker;
1921     obj->oop_iterate(&walker);
1922   }
1923 }
1924 #endif
1925 
1926 void HeapShared::check_special_subgraph_classes() {
1927   if (CDSConfig::is_dumping_aot_linked_classes()) {
1928     // We can have aot-initialized classes (such as Enums) that can reference objects
1929     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1930     // aot-initialize classes that are "safe".
1931     //
1932     // TODO: we need an automatic tool that checks the safety of aot-initialized
1933     // classes (when we extend the set of aot-initialized classes beyond JEP 483)

2247                                                   f->offset, f->field_name);
2248     }
2249     done_recording_subgraph(info->klass, klass_name);
2250   }
2251 
2252   log_info(aot, heap)("Archived subgraph records = %zu",
2253                       _num_total_subgraph_recordings);
2254   log_info(aot, heap)("  Walked %zu objects", _num_total_walked_objs);
2255   log_info(aot, heap)("  Archived %zu objects", _num_total_archived_objs);
2256   log_info(aot, heap)("  Recorded %zu klasses", _num_total_recorded_klasses);
2257 
2258 #ifndef PRODUCT
2259   for (int i = 0; fields[i].valid(); i++) {
2260     ArchivableStaticFieldInfo* f = &fields[i];
2261     verify_subgraph_from_static_field(f->klass, f->offset);
2262   }
2263   log_info(aot, heap)("  Verified %zu references", _num_total_verifications);
2264 #endif
2265 }
2266 
2267 bool HeapShared::is_dumped_interned_string(oop o) {
2268   if (is_writing_mapping_mode()) {
2269     return AOTMappedHeapWriter::is_dumped_interned_string(o);
2270   } else {
2271     return AOTStreamedHeapWriter::is_dumped_interned_string(o);





2272   }





2273 }
2274 
2275 // These tables should be used only within the CDS safepoint, so
2276 // delete them before we exit the safepoint. Otherwise the table will
2277 // contain bad oops after a GC.
2278 void HeapShared::delete_tables_with_raw_oops() {
2279   assert(_seen_objects_table == nullptr, "should have been deleted");
2280 
2281   if (is_writing_mapping_mode()) {
2282     AOTMappedHeapWriter::delete_tables_with_raw_oops();
2283   } else {
2284     assert(is_writing_streaming_mode(), "what other mode?");
2285     AOTStreamedHeapWriter::delete_tables_with_raw_oops();
2286   }
2287 }
2288 
2289 void HeapShared::debug_trace() {
2290   ResourceMark rm;
2291   oop referrer = _object_being_archived.referrer();
2292   if (referrer != nullptr) {

   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotArtifactFinder.hpp"
  26 #include "cds/aotCacheAccess.hpp"
  27 #include "cds/aotClassInitializer.hpp"
  28 #include "cds/aotClassLocation.hpp"
  29 #include "cds/aotCompressedPointers.hpp"
  30 #include "cds/aotConstantPoolResolver.hpp"
  31 #include "cds/aotLogging.hpp"
  32 #include "cds/aotMappedHeapLoader.hpp"
  33 #include "cds/aotMappedHeapWriter.hpp"
  34 #include "cds/aotMetaspace.hpp"
  35 #include "cds/aotOopChecker.hpp"
  36 #include "cds/aotReferenceObjSupport.hpp"
  37 #include "cds/aotStreamedHeapLoader.hpp"
  38 #include "cds/aotStreamedHeapWriter.hpp"
  39 #include "cds/archiveBuilder.hpp"
  40 #include "cds/archiveUtils.hpp"
  41 #include "cds/cds_globals.hpp"
  42 #include "cds/cdsConfig.hpp"
  43 #include "cds/cdsEnumKlass.hpp"
  44 #include "cds/cdsHeapVerifier.hpp"
  45 #include "cds/heapShared.inline.hpp"
  46 #include "cds/regeneratedClasses.hpp"
  47 #include "classfile/classLoaderData.hpp"
  48 #include "classfile/javaClasses.inline.hpp"
  49 #include "classfile/modules.hpp"
  50 #include "classfile/stringTable.hpp"
  51 #include "classfile/symbolTable.hpp"
  52 #include "classfile/systemDictionary.hpp"
  53 #include "classfile/systemDictionaryShared.hpp"
  54 #include "classfile/vmClasses.hpp"
  55 #include "classfile/vmSymbols.hpp"
  56 #include "code/aotCodeCache.hpp"
  57 #include "gc/shared/collectedHeap.hpp"
  58 #include "gc/shared/gcLocker.hpp"
  59 #include "gc/shared/gcVMOperations.hpp"
  60 #include "logging/log.hpp"
  61 #include "logging/logStream.hpp"
  62 #include "memory/iterator.inline.hpp"
  63 #include "memory/resourceArea.hpp"
  64 #include "memory/universe.hpp"
  65 #include "oops/compressedOops.inline.hpp"
  66 #include "oops/fieldStreams.inline.hpp"
  67 #include "oops/objArrayOop.inline.hpp"
  68 #include "oops/oop.inline.hpp"
  69 #include "oops/oopHandle.inline.hpp"
  70 #include "oops/typeArrayOop.inline.hpp"
  71 #include "prims/jvmtiExport.hpp"
  72 #include "runtime/arguments.hpp"
  73 #include "runtime/fieldDescriptor.inline.hpp"
  74 #include "runtime/globals_extension.hpp"
  75 #include "runtime/init.hpp"
  76 #include "runtime/javaCalls.hpp"

 116 #endif
 117 
 118 
 119 //
 120 // If you add new entries to the following tables, you should know what you're doing!
 121 //
 122 
 123 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 124   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 125   {"java/lang/Long$LongCache",                    "archivedCache"},
 126   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 127   {"java/lang/Short$ShortCache",                  "archivedCache"},
 128   {"java/lang/Character$CharacterCache",          "archivedCache"},
 129   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 130   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 131   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 132   {"java/util/ImmutableCollections",              "archivedObjects"},
 133   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 134   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 135   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 136   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
 137 
 138 #ifndef PRODUCT
 139   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 140 #endif
 141   {nullptr, nullptr},
 142 };
 143 
 144 // full module graph
 145 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 146   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 147   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 148   {"java/lang/Module$ArchivedData",               "archivedData"},
 149   {nullptr, nullptr},
 150 };
 151 
 152 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 153 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 154 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
 155 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 156 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;

 170     // Legacy CDS archive support (to be deprecated)
 171     return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 172            is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 173   } else {
 174     return false;
 175   }
 176 }
 177 
 178 oop HeapShared::CachedOopInfo::orig_referrer() const {
 179   return _orig_referrer.resolve();
 180 }
 181 
 182 unsigned HeapShared::oop_hash(oop const& p) {
 183   assert(SafepointSynchronize::is_at_safepoint() ||
 184          JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
 185   // Do not call p->identity_hash() as that will update the
 186   // object header.
 187   return primitive_hash(cast_from_oop<intptr_t>(p));
 188 }
 189 
 190 // About the hashcode in the cached objects:
 191 // - If a source object has a hashcode, it must be copied into the cache.
 192 //   That's because some cached hashtables are laid out using this hashcode.
 193 // - If a source object doesn't have a hashcode, we avoid computing it while
 194 //   copying the objects into the cache. This will allow the hashcode to be
 195 //   dynamically and randomly computed in each production, which generally
 196 //   desirable to make the hashcodes more random between runs.
 197 unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) {
 198   oop o = oh.resolve();
 199   if (_use_identity_hash_for_archived_object_cache) {
 200     // This is called after all objects are copied. It's OK to update
 201     // the object's hashcode.
 202     //
 203     // This may be called after we have left the AOT dumping safepoint.
 204     // Objects in archived_object_cache() may be moved by the GC, so we
 205     // can't use the address of o for computing the hash.
 206     if (o == nullptr) {
 207       return 0;
 208     } else {
 209       return o->identity_hash();
 210     }
 211   } else {
 212     // This is called while we are copying the objects. Don't call o->identity_hash()
 213     // as that will update the object header.
 214     return oop_hash(o);
 215   }
 216 }
 217 
 218 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 219   return a.resolve() == b.resolve();
 220 }
 221 
 222 static void reset_states(oop obj, TRAPS) {
 223   Handle h_obj(THREAD, obj);
 224   InstanceKlass* klass = InstanceKlass::cast(obj->klass());
 225   TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
 226   Symbol* method_sig = vmSymbols::void_method_signature();
 227 
 228   while (klass != nullptr) {
 229     Method* method = klass->find_method(method_name, method_sig);
 230     if (method != nullptr) {
 231       assert(method->is_private(), "must be");
 232       if (log_is_enabled(Debug, aot)) {
 233         ResourceMark rm(THREAD);
 234         log_debug(aot)("  calling %s", method->name_and_sig_as_C_string());

 272 
 273   Klass* weak_ref_key_class = SystemDictionary::resolve_or_fail(class_name, true, CHECK);
 274   precond(weak_ref_key_class != nullptr);
 275 
 276   log_debug(aot)("Calling WeakReferenceKey::ensureDeterministicAOTCache(Object.class)");
 277   JavaValue result(T_BOOLEAN);
 278   JavaCalls::call_static(&result,
 279                          weak_ref_key_class,
 280                          method_name,
 281                          vmSymbols::void_boolean_signature(),
 282                          CHECK);
 283   assert(result.get_jboolean() == false, "sanity");
 284 }
 285 
 286 void HeapShared::prepare_for_archiving(TRAPS) {
 287   reset_archived_object_states(CHECK);
 288   ensure_determinism(CHECK);
 289 }
 290 
 291 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 292 bool HeapShared::_use_identity_hash_for_archived_object_cache = false;
 293 
 294 bool HeapShared::is_archived_heap_in_use() {
 295   if (HeapShared::is_loading()) {
 296     if (HeapShared::is_loading_streaming_mode()) {
 297       return AOTStreamedHeapLoader::is_in_use();
 298     } else {
 299       return AOTMappedHeapLoader::is_in_use();
 300     }
 301   }
 302 
 303   return false;
 304 }
 305 
 306 bool HeapShared::can_use_archived_heap() {
 307   FileMapInfo* static_mapinfo = FileMapInfo::current_info();
 308   if (static_mapinfo == nullptr) {
 309     return false;
 310   }
 311   if (!static_mapinfo->has_heap_region()) {
 312     return false;

 387 
 388 void HeapShared::initialize_streaming() {
 389   assert(is_loading_streaming_mode(), "shouldn't call this");
 390   if (can_use_archived_heap()) {
 391     AOTStreamedHeapLoader::initialize();
 392   }
 393 }
 394 
 395 void HeapShared::enable_gc() {
 396   if (AOTStreamedHeapLoader::is_in_use()) {
 397     AOTStreamedHeapLoader::enable_gc();
 398   }
 399 }
 400 
 401 void HeapShared::materialize_thread_object() {
 402   if (AOTStreamedHeapLoader::is_in_use()) {
 403     AOTStreamedHeapLoader::materialize_thread_object();
 404   }
 405 }
 406 
 407 void HeapShared::archive_interned_string(oop string) {
 408   assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");

 409   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
 410   assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
 411 }
 412 
 413 void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
 414   if (HeapShared::is_loading()) {
 415     if (HeapShared::is_loading_streaming_mode()) {
 416       // Heap initialization can be done only after vtables are initialized by ReadClosure.
 417       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 418     } else {
 419       // Finish up archived heap initialization. These must be
 420       // done after ReadClosure.
 421       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 422     }
 423   }
 424 }
 425 
 426 void HeapShared::make_archived_object_cache_gc_safe() {
 427   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 428 
 429   // It's safe to change the behavior of the hash function now, because iterate_all()
 430   // doesn't call the hash function.
 431   _use_identity_hash_for_archived_object_cache = true;
 432 
 433   // Copy all CachedOopInfo into a new table using a different hashing algorithm
 434   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
 435       new_cache->put_when_absent(oh, info);
 436     });
 437 
 438   destroy_archived_object_cache();
 439   _archived_object_cache = new_cache;
 440 }
 441 
 442 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 443   OopHandle oh(Universe::vm_global(), obj);
 444   CachedOopInfo* result = _archived_object_cache->get(oh);
 445   oh.release(Universe::vm_global());
 446   return result;
 447 }
 448 
 449 bool HeapShared::has_been_archived(oop obj) {
 450   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 451   return get_cached_oop_info(obj) != nullptr;
 452 }
 453 
 454 int HeapShared::append_root(oop obj) {
 455   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
 456   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 457   assert(_pending_roots != nullptr, "sanity");
 458 
 459   if (obj == nullptr) {
 460     assert(_pending_roots->at(0) == nullptr, "root index 1 is always null");
 461     return 0;
 462   } else if (CDSConfig::is_dumping_aot_linked_classes()) {
 463     // The AOT compiler may refer the same obj many times, so we
 464     // should use the same index for this oop to avoid excessive entries
 465     // in the roots array.
 466     CachedOopInfo* obj_info = get_cached_oop_info(obj);
 467     assert(obj_info != nullptr, "must be archived");
 468 
 469     if (obj_info->root_index() > 0) {
 470       return obj_info->root_index();
 471     } else {
 472       int i = _pending_roots->append(obj);
 473       obj_info->set_root_index(i);
 474       return i;
 475     }
 476   } else {
 477     return _pending_roots->append(obj);
 478   }
 479 }
 480 
 481 int HeapShared::get_root_index(oop obj) {
 482   if (java_lang_Class::is_instance(obj)) {
 483     obj = scratch_java_mirror(obj);
 484   }
 485 
 486   CachedOopInfo* obj_info = get_cached_oop_info(obj);
 487   const char* error = nullptr;
 488   if (obj_info == nullptr) {
 489     error = "Not a cached oop";
 490   } else if (obj_info->root_index() < 0) {
 491     error = "Not a cached oop root";
 492   } else {
 493     return obj_info->root_index();
 494   }


 495 
 496   ResourceMark rm;
 497   log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error,
 498                                   cast_from_oop<uintptr_t>(obj),
 499                                   obj->klass()->external_name());
 500   return -1;
 501 }
 502 
 503 oop HeapShared::get_root(int index, bool clear) {
 504   assert(index >= 0, "sanity");
 505   assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
 506 
 507   oop result;
 508   if (HeapShared::is_loading_streaming_mode()) {
 509     result = AOTStreamedHeapLoader::get_root(index);
 510   } else {
 511     assert(HeapShared::is_loading_mapping_mode(), "must be");
 512     result = AOTMappedHeapLoader::get_root(index);
 513   }
 514 
 515   if (clear) {
 516     clear_root(index);
 517   }
 518 
 519   return result;
 520 }
 521 
 522 void HeapShared::finish_materialize_objects() {
 523   if (AOTStreamedHeapLoader::is_in_use()) {
 524     AOTStreamedHeapLoader::finish_materialize_objects();
 525   }
 526 }
 527 
 528 void HeapShared::clear_root(int index) {
 529   if (CDSConfig::is_using_aot_linked_classes()) {
 530     // When AOT linked classes are in use, all roots will be in use all
 531     // the time, there's no benefit for clearing the roots. Also, we
 532     // can't clear the roots as they can be shared.
 533     return;
 534   }
 535 
 536   assert(index >= 0, "sanity");
 537   assert(CDSConfig::is_using_archive(), "must be");
 538   if (is_archived_heap_in_use()) {
 539     if (log_is_enabled(Debug, aot, heap)) {
 540       log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));
 541     }
 542     if (HeapShared::is_loading_streaming_mode()) {
 543       AOTStreamedHeapLoader::clear_root(index);
 544     } else {
 545       assert(HeapShared::is_loading_mapping_mode(), "must be");
 546       AOTMappedHeapLoader::clear_root(index);
 547     }
 548   }
 549 }
 550 
 551 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
 552   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 553 
 554   assert(!obj->is_stackChunk(), "do not archive stack chunks");
 555   if (has_been_archived(obj)) {

 596         // returned from jdk.internal.misc.CDS::initializeFromArchive().
 597         // See HeapShared::initialize_from_archived_subgraph().
 598     {
 599       AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
 600     }
 601 
 602     if (java_lang_Class::is_instance(obj)) {
 603       Klass* mirror_k = java_lang_Class::as_Klass(obj);
 604       if (mirror_k != nullptr) {
 605         AOTArtifactFinder::add_cached_class(mirror_k);
 606       }
 607     } else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
 608       Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
 609       if (m != nullptr) {
 610         if (RegeneratedClasses::has_been_regenerated(m)) {
 611           m = RegeneratedClasses::get_regenerated_object(m);
 612         }
 613         InstanceKlass* method_holder = m->method_holder();
 614         AOTArtifactFinder::add_cached_class(method_holder);
 615       }
 616     } else if (AOTCodeCache::is_dumping_code() &&
 617                (java_lang_invoke_MethodHandle::is_instance(obj) || is_interned_string(obj))) {
 618       // Needed by AOT compiler.
 619       append_root(obj);
 620     }
 621   }
 622 
 623   if (log_is_enabled(Debug, aot, heap)) {
 624     ResourceMark rm;
 625     LogTarget(Debug, aot, heap) log;
 626     LogStream out(log);
 627     out.print("Archived heap object " PTR_FORMAT " : %s ",
 628               p2i(obj), obj->klass()->external_name());
 629     if (java_lang_Class::is_instance(obj)) {
 630       Klass* k = java_lang_Class::as_Klass(obj);
 631       if (k != nullptr) {
 632         out.print("%s", k->external_name());
 633       } else {
 634         out.print("primitive");
 635       }
 636     }
 637     out.cr();
 638   }
 639 

 667       remove(ptr);
 668     }
 669   }
 670 };
 671 
 672 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 673   if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
 674     // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
 675     // Ignore it, as this class will be excluded from the AOT config.
 676     return;
 677   }
 678   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 679     _scratch_objects_table->set_oop(src, dest);
 680   }
 681 }
 682 
 683 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 684   return (objArrayOop)_scratch_objects_table->get_oop(src);
 685 }
 686 
 687 void HeapShared::init_dumping() {
 688   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 689   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 690   _pending_roots->append(nullptr); // root index 0 represents a null oop
 691 }
 692 
 693 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 694   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 695     BasicType bt = (BasicType)i;
 696     if (!is_reference_type(bt)) {
 697       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 698       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 699     }
 700   }
 701 }
 702 
 703 // Given java_mirror that represents a (primitive or reference) type T,
 704 // return the "scratch" version that represents the same type T. Note
 705 // that java_mirror will be returned if the mirror is already a scratch mirror.
 706 //
 707 // See java_lang_Class::create_scratch_mirror() for more info.
 708 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 709   assert(java_lang_Class::is_instance(java_mirror), "must be");
 710 

 787   if (RegeneratedClasses::is_regenerated_object(ik)) {
 788     InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
 789     precond(orig_ik->is_initialized());
 790     orig_mirror = orig_ik->java_mirror();
 791   } else {
 792     precond(ik->is_initialized());
 793     orig_mirror = ik->java_mirror();
 794   }
 795 
 796   oop m = scratch_java_mirror(ik);
 797   int nfields = 0;
 798   for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
 799     if (fs.access_flags().is_static()) {
 800       fieldDescriptor& fd = fs.field_descriptor();
 801       int offset = fd.offset();
 802       switch (fd.field_type()) {
 803       case T_OBJECT:
 804       case T_ARRAY:
 805         {
 806           oop field_obj = orig_mirror->obj_field(offset);





 807           m->obj_field_put(offset, field_obj);
 808           if (field_obj != nullptr) {
 809             bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
 810             assert(success, "sanity");
 811           }
 812         }
 813         break;
 814       case T_BOOLEAN:
 815         m->bool_field_put(offset, orig_mirror->bool_field(offset));
 816         break;
 817       case T_BYTE:
 818         m->byte_field_put(offset, orig_mirror->byte_field(offset));
 819         break;
 820       case T_SHORT:
 821         m->short_field_put(offset, orig_mirror->short_field(offset));
 822         break;
 823       case T_CHAR:
 824         m->char_field_put(offset, orig_mirror->char_field(offset));
 825         break;
 826       case T_INT:

 861   // We need to retain the identity_hash, because it may have been used by some hashtables
 862   // in the shared heap.
 863   if (!orig_mirror->fast_no_hash_check()) {
 864     intptr_t src_hash = orig_mirror->identity_hash();
 865     if (UseCompactObjectHeaders) {
 866       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 867       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 868     } else {
 869       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 870     }
 871     assert(scratch_m->mark().is_unlocked(), "sanity");
 872 
 873     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 874     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 875   }
 876 
 877   if (CDSConfig::is_dumping_aot_linked_classes()) {
 878     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 879     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 880   }
 881 
 882   Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
 883   if (CDSConfig::is_dumping_reflection_data() &&
 884       k != nullptr && k->is_instance_klass() &&
 885       java_lang_Class::reflection_data(orig_mirror) != nullptr &&
 886       AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
 887     java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
 888   }
 889 }
 890 
 891 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 892   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 893     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 894     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 895       return HeapShared::scratch_resolved_references(src_ik->constants());
 896     }
 897   }
 898   return nullptr;
 899 }
 900 
 901 int HeapShared::archive_exception_instance(oop exception) {
 902   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 903   assert(success, "sanity");
 904   return append_root(exception);
 905 }
 906 
 907 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
 908   OopHandle oh(&src_obj);

 940                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 941     }
 942 
 943     archive_subgraphs();
 944   }
 945 
 946   init_seen_objects_table();
 947   Universe::archive_exception_instances();
 948 }
 949 
 950 void HeapShared::end_scanning_for_oops() {
 951   if (is_writing_mapping_mode()) {
 952     StringTable::init_shared_table();
 953   }
 954   delete_seen_objects_table();
 955 }
 956 
 957 void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeapInfo* streamed_heap_info) {
 958   {
 959     NoSafepointVerifier nsv;
 960     if (!SkipArchiveHeapVerification) {
 961       CDSHeapVerifier::verify();
 962     }
 963     check_special_subgraph_classes();
 964   }
 965 
 966   if (HeapShared::is_writing_mapping_mode()) {
 967     StringTable::write_shared_table();
 968     AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
 969   } else {
 970     assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
 971     AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
 972   }
 973 
 974   ArchiveBuilder::OtherROAllocMark mark;
 975   write_subgraph_info_table();
 976 
 977   delete _pending_roots;
 978   _pending_roots = nullptr;
 979 
 980   make_archived_object_cache_gc_safe();
 981 }
 982 
 983 void HeapShared::scan_java_mirror(oop orig_mirror) {
 984   oop m = scratch_java_mirror(orig_mirror);
 985   if (m != nullptr) { // nullptr if for custom class loader
 986     copy_java_mirror(orig_mirror, m);
 987     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 988     assert(success, "sanity");
 989 
 990     oop extra;
 991     if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
 992       success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
 993       assert(success, "sanity");
 994     }
 995   }
 996 }
 997 
 998 void HeapShared::scan_java_class(Klass* orig_k) {
 999   scan_java_mirror(orig_k->java_mirror());
1000 
1001   if (orig_k->is_instance_klass()) {
1002     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
1003     orig_ik->constants()->prepare_resolved_references_for_archiving();
1004     objArrayOop rr = get_archived_resolved_references(orig_ik);
1005     if (rr != nullptr) {
1006       bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
1007       assert(success, "must be");
1008     }
1009   }
1010 }
1011 
1012 void HeapShared::archive_subgraphs() {
1013   assert(CDSConfig::is_dumping_heap(), "must be");
1014 

1039                                                   &created);
1040   assert(created, "must not initialize twice");
1041   return info;
1042 }
1043 
1044 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
1045   assert(CDSConfig::is_dumping_heap(), "dump time only");
1046   KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
1047   assert(info != nullptr, "must have been initialized");
1048   return info;
1049 }
1050 
1051 // Add an entry field to the current KlassSubGraphInfo.
1052 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
1053   assert(CDSConfig::is_dumping_heap(), "dump time only");
1054   if (_subgraph_entry_fields == nullptr) {
1055     _subgraph_entry_fields =
1056       new (mtClass) GrowableArray<int>(10, mtClass);
1057   }
1058   _subgraph_entry_fields->append(static_field_offset);
1059   if (v == nullptr) {
1060     _subgraph_entry_fields->append(-1);
1061   } else {
1062     _subgraph_entry_fields->append(HeapShared::append_root(v));
1063   }
1064 }
1065 
1066 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
1067 // Only objects of boot classes can be included in sub-graph.
1068 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
1069   assert(CDSConfig::is_dumping_heap(), "dump time only");
1070 
1071   if (_subgraph_object_klasses == nullptr) {
1072     _subgraph_object_klasses =
1073       new (mtClass) GrowableArray<Klass*>(50, mtClass);
1074   }
1075 
1076   if (_k == orig_k) {
1077     // Don't add the Klass containing the sub-graph to it's own klass
1078     // initialization list.
1079     return;
1080   }
1081 
1082   if (orig_k->is_instance_klass()) {
1083 #ifdef ASSERT

1385     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1386     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1387     assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1388     resolve_classes_for_subgraph_of(current, k);
1389   }
1390 }
1391 
1392 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1393   JavaThread* THREAD = current;
1394   ExceptionMark em(THREAD);
1395   const ArchivedKlassSubGraphInfoRecord* record =
1396    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1397   if (HAS_PENDING_EXCEPTION) {
1398    CLEAR_PENDING_EXCEPTION;
1399   }
1400   if (record == nullptr) {
1401    clear_archived_roots_of(k);
1402   }
1403 }
1404 
1405 static const char* java_lang_invoke_core_klasses[] = {
1406   "java/lang/invoke/Invokers$Holder",
1407   "java/lang/invoke/MethodHandle",
1408   "java/lang/invoke/MethodHandleNatives",
1409   "java/lang/invoke/DirectMethodHandle$Holder",
1410   "java/lang/invoke/DelegatingMethodHandle$Holder",
1411   "java/lang/invoke/LambdaForm$Holder",
1412   "java/lang/invoke/BoundMethodHandle$Species_L",
1413 };
1414 
1415 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1416   if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1417     int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
1418     for (int i = 0; i < len; i++) {
1419       resolve_or_init(java_lang_invoke_core_klasses[i], true, CHECK);
1420     }



1421   }
1422 }
1423 
1424 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1425 //   - interned strings
1426 //   - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1427 //   - ConstantPool::resolved_references()
1428 //   - Universe::<xxx>_exception_instance()
1429 //
1430 // For example, if this enum class is initialized at AOT cache assembly time:
1431 //
1432 //    enum Fruit {
1433 //       APPLE, ORANGE, BANANA;
1434 //       static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1435 //   }
1436 //
1437 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1438 // should be initialized before any Java code can access the Fruit class. Note that
1439 // HashSet itself doesn't necessary need to be an aot-initialized class.
1440 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {

1598       ik->initialize(CHECK);
1599     } else if (k->is_objArray_klass()) {
1600       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1601       oak->initialize(CHECK);
1602     }
1603   }
1604 }
1605 
1606 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1607   verify_the_heap(k, "before");
1608 
1609   Array<int>* entry_field_records = record->entry_field_records();
1610   if (entry_field_records != nullptr) {
1611     int efr_len = entry_field_records->length();
1612     assert(efr_len % 2 == 0, "sanity");
1613     for (int i = 0; i < efr_len; i += 2) {
1614       int field_offset = entry_field_records->at(i);
1615       int root_index = entry_field_records->at(i+1);
1616       // Load the subgraph entry fields from the record and store them back to
1617       // the corresponding fields within the mirror.
1618       oop v;
1619       if (root_index < 0) {
1620         v = nullptr;
1621       } else {
1622         v = get_root(root_index, /*clear=*/true);
1623       }
1624       oop m = k->java_mirror();
1625       if (k->has_aot_initialized_mirror()) {
1626         assert(v == m->obj_field(field_offset), "must be aot-initialized");
1627       } else {
1628         m->obj_field_put(field_offset, v);
1629       }
1630       log_debug(aot, heap)("  " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1631     }
1632 
1633     // Done. Java code can see the archived sub-graphs referenced from k's
1634     // mirror after this point.
1635     if (log_is_enabled(Info, aot, heap)) {
1636       ResourceMark rm;
1637       log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1638                           k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1639                           k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1640     }
1641   }
1642 
1643   verify_the_heap(k, "after ");

2011     verify_subgraph_from(f);
2012   }
2013 }
2014 
2015 void HeapShared::verify_subgraph_from(oop orig_obj) {
2016   if (!has_been_archived(orig_obj)) {
2017     // It's OK for the root of a subgraph to be not archived. See comments in
2018     // archive_reachable_objects_from().
2019     return;
2020   }
2021 
2022   // Verify that all objects reachable from orig_obj are archived.
2023   init_seen_objects_table();
2024   verify_reachable_objects_from(orig_obj);
2025   delete_seen_objects_table();
2026 }
2027 
2028 void HeapShared::verify_reachable_objects_from(oop obj) {
2029   _num_total_verifications ++;
2030   if (java_lang_Class::is_instance(obj)) {
2031     Klass* k = java_lang_Class::as_Klass(obj);
2032     if (RegeneratedClasses::has_been_regenerated(k)) {
2033       k = RegeneratedClasses::get_regenerated_object(k);
2034       obj = k->java_mirror();
2035     }
2036     obj = scratch_java_mirror(obj);
2037     assert(obj != nullptr, "must be");
2038   }
2039   if (!has_been_seen_during_subgraph_recording(obj)) {
2040     set_has_been_seen_during_subgraph_recording(obj);
2041     assert(has_been_archived(obj), "must be");
2042     VerifySharedOopClosure walker;
2043     obj->oop_iterate(&walker);
2044   }
2045 }
2046 #endif
2047 
2048 void HeapShared::check_special_subgraph_classes() {
2049   if (CDSConfig::is_dumping_aot_linked_classes()) {
2050     // We can have aot-initialized classes (such as Enums) that can reference objects
2051     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2052     // aot-initialize classes that are "safe".
2053     //
2054     // TODO: we need an automatic tool that checks the safety of aot-initialized
2055     // classes (when we extend the set of aot-initialized classes beyond JEP 483)

2369                                                   f->offset, f->field_name);
2370     }
2371     done_recording_subgraph(info->klass, klass_name);
2372   }
2373 
2374   log_info(aot, heap)("Archived subgraph records = %zu",
2375                       _num_total_subgraph_recordings);
2376   log_info(aot, heap)("  Walked %zu objects", _num_total_walked_objs);
2377   log_info(aot, heap)("  Archived %zu objects", _num_total_archived_objs);
2378   log_info(aot, heap)("  Recorded %zu klasses", _num_total_recorded_klasses);
2379 
2380 #ifndef PRODUCT
2381   for (int i = 0; fields[i].valid(); i++) {
2382     ArchivableStaticFieldInfo* f = &fields[i];
2383     verify_subgraph_from_static_field(f->klass, f->offset);
2384   }
2385   log_info(aot, heap)("  Verified %zu references", _num_total_verifications);
2386 #endif
2387 }
2388 
2389 bool HeapShared::is_interned_string(oop obj) {
2390   if (!java_lang_String::is_instance(obj)) {
2391     return false;
2392   }
2393 
2394   ResourceMark rm;
2395   int len = 0;
2396   jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
2397   if (name == nullptr) {
2398     fatal("Insufficient memory for dumping");
2399   }
2400   return StringTable::lookup(name, len) == obj;
2401 }
2402 
2403 bool HeapShared::is_dumped_interned_string(oop o) {
2404   return is_interned_string(o) && has_been_archived(o);
2405 }
2406 
2407 // These tables should be used only within the CDS safepoint, so
2408 // delete them before we exit the safepoint. Otherwise the table will
2409 // contain bad oops after a GC.
2410 void HeapShared::delete_tables_with_raw_oops() {
2411   assert(_seen_objects_table == nullptr, "should have been deleted");
2412 
2413   if (is_writing_mapping_mode()) {
2414     AOTMappedHeapWriter::delete_tables_with_raw_oops();
2415   } else {
2416     assert(is_writing_streaming_mode(), "what other mode?");
2417     AOTStreamedHeapWriter::delete_tables_with_raw_oops();
2418   }
2419 }
2420 
2421 void HeapShared::debug_trace() {
2422   ResourceMark rm;
2423   oop referrer = _object_being_archived.referrer();
2424   if (referrer != nullptr) {
< prev index next >