< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 

  25 #include "cds/aotArtifactFinder.hpp"
  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/aotClassLocation.hpp"
  28 #include "cds/aotReferenceObjSupport.hpp"
  29 #include "cds/archiveBuilder.hpp"
  30 #include "cds/archiveHeapLoader.hpp"
  31 #include "cds/archiveHeapWriter.hpp"
  32 #include "cds/archiveUtils.hpp"
  33 #include "cds/cdsConfig.hpp"
  34 #include "cds/cdsEnumKlass.hpp"
  35 #include "cds/cdsHeapVerifier.hpp"
  36 #include "cds/heapShared.hpp"
  37 #include "cds/metaspaceShared.hpp"
  38 #include "classfile/classLoaderData.hpp"
  39 #include "classfile/classLoaderExt.hpp"
  40 #include "classfile/javaClasses.inline.hpp"
  41 #include "classfile/modules.hpp"
  42 #include "classfile/stringTable.hpp"
  43 #include "classfile/symbolTable.hpp"
  44 #include "classfile/systemDictionary.hpp"

  71 #include "gc/g1/g1CollectedHeap.hpp"
  72 #endif
  73 
  74 #if INCLUDE_CDS_JAVA_HEAP
  75 
  76 struct ArchivableStaticFieldInfo {
  77   const char* klass_name;
  78   const char* field_name;
  79   InstanceKlass* klass;
  80   int offset;
  81   BasicType type;
  82 
  83   ArchivableStaticFieldInfo(const char* k, const char* f)
  84   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  85 
  86   bool valid() {
  87     return klass_name != nullptr;
  88   }
  89 };
  90 











  91 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
  92 
  93 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
  94 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
  95 size_t HeapShared::_total_obj_count;
  96 size_t HeapShared::_total_obj_size;
  97 
  98 #ifndef PRODUCT
  99 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 100 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 101 static const char* _test_class_name = nullptr;
 102 static Klass* _test_class = nullptr;
 103 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 104 #endif
 105 
 106 
 107 //
 108 // If you add new entries to the following tables, you should know what you're doing!
 109 //
 110 
 111 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 112   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 113   {"java/lang/Long$LongCache",                    "archivedCache"},
 114   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 115   {"java/lang/Short$ShortCache",                  "archivedCache"},
 116   {"java/lang/Character$CharacterCache",          "archivedCache"},
 117   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 118   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 119   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 120   {"java/util/ImmutableCollections",              "archivedObjects"},
 121   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 122   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 123   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},

 124 
 125 #ifndef PRODUCT
 126   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 127 #endif
 128   {nullptr, nullptr},
 129 };
 130 
 131 // full module graph
 132 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 133   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 134   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 135   {"java/lang/Module$ArchivedData",               "archivedData"},
 136   {nullptr, nullptr},
 137 };
 138 
 139 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 140 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 141 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;

 142 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
 143 int HeapShared::_root_segment_max_size_elems;
 144 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 145 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
 146 
 147 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 148   for (int i = 0; fields[i].valid(); i++) {
 149     if (fields[i].klass == ik) {
 150       return true;
 151     }
 152   }
 153   return false;
 154 }
 155 
 156 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 157   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 158          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 159 }
 160 
 161 unsigned HeapShared::oop_hash(oop const& p) {

 209                          CHECK);
 210   Handle boot_loader(THREAD, result.get_oop());
 211   reset_states(boot_loader(), CHECK);
 212 }
 213 
 214 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 215 
 216 bool HeapShared::has_been_archived(oop obj) {
 217   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 218   return archived_object_cache()->get(obj) != nullptr;
 219 }
 220 
 221 int HeapShared::append_root(oop obj) {
 222   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 223   if (obj != nullptr) {
 224     assert(has_been_archived(obj), "must be");
 225   }
 226   // No GC should happen since we aren't scanning _pending_roots.
 227   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 228 
 229   return _pending_roots->append(obj);

 230 }
 231 
 232 objArrayOop HeapShared::root_segment(int segment_idx) {
 233   if (CDSConfig::is_dumping_heap()) {
 234     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 235   } else {
 236     assert(CDSConfig::is_using_archive(), "must be");
 237   }
 238 
 239   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 240   assert(segment != nullptr, "should have been initialized");
 241   return segment;
 242 }
 243 








































































































































































 244 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 245   assert(_root_segment_max_size_elems > 0, "sanity");
 246 
 247   // Try to avoid divisions for the common case.
 248   if (idx < _root_segment_max_size_elems) {
 249     seg_idx = 0;
 250     int_idx = idx;
 251   } else {
 252     seg_idx = idx / _root_segment_max_size_elems;
 253     int_idx = idx % _root_segment_max_size_elems;
 254   }
 255 
 256   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 257          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 258 }
 259 
 260 // Returns an objArray that contains all the roots of the archived objects
 261 oop HeapShared::get_root(int index, bool clear) {
 262   assert(index >= 0, "sanity");
 263   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 388     OopHandle* handle = get(ptr);
 389     if (handle != nullptr) {
 390       handle->release(Universe::vm_global());
 391       remove(ptr);
 392     }
 393   }
 394 };
 395 
 396 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 397   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 398     _scratch_objects_table->set_oop(src, dest);
 399   }
 400 }
 401 
 402 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 403   return (objArrayOop)_scratch_objects_table->get_oop(src);
 404 }
 405 
 406 void HeapShared::init_dumping() {
 407   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 408   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 409 }
 410 
 411 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 412   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 413     BasicType bt = (BasicType)i;
 414     if (!is_reference_type(bt)) {
 415       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 416       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);

 417     }
 418   }
 419 }
 420 
 421 // Given java_mirror that represents a (primitive or reference) type T,
 422 // return the "scratch" version that represents the same type T.
 423 // Note that if java_mirror will be returned if it's already a
 424 // scratch mirror.
 425 //
 426 // See java_lang_Class::create_scratch_mirror() for more info.
 427 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 428   assert(java_lang_Class::is_instance(java_mirror), "must be");
 429 
 430   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 431     BasicType bt = (BasicType)i;
 432     if (!is_reference_type(bt)) {
 433       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 434         return java_mirror;
 435       }
 436     }
 437   }
 438 
 439   if (java_lang_Class::is_primitive(java_mirror)) {
 440     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 441   } else {
 442     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 443   }
 444 }
 445 
 446 oop HeapShared::scratch_java_mirror(BasicType t) {
 447   assert((uint)t < T_VOID+1, "range check");
 448   assert(!is_reference_type(t), "sanity");
 449   return _scratch_basic_type_mirrors[t].resolve();
 450 }
 451 
 452 oop HeapShared::scratch_java_mirror(Klass* k) {
 453   return _scratch_objects_table->get_oop(k);
 454 }
 455 
 456 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {

 457   _scratch_objects_table->set_oop(k, mirror);
 458 }
 459 
 460 void HeapShared::remove_scratch_objects(Klass* k) {
 461   // Klass is being deallocated. Java mirror can still be alive, and it should not
 462   // point to dead klass. We need to break the link from mirror to the Klass.
 463   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 464   oop mirror = _scratch_objects_table->get_oop(k);
 465   if (mirror != nullptr) {
 466     java_lang_Class::set_klass(mirror, nullptr);
 467   }
 468   _scratch_objects_table->remove_oop(k);
 469   if (k->is_instance_klass()) {
 470     _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
 471   }









 472 }
 473 
 474 //TODO: we eventually want a more direct test for these kinds of things.
 475 //For example the JVM could record some bit of context from the creation
 476 //of the klass, such as who called the hidden class factory.  Using
 477 //string compares on names is fragile and will break as soon as somebody
 478 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 479 //related ideas about marking AOT-related classes.
 480 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 481   return ik->is_hidden() &&
 482     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 483      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 484      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 485      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 486 }
 487 
 488 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 489   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 490 }
 491 

 635   assert(info != nullptr, "must be");
 636   has_oop_pointers = info->has_oop_pointers();
 637   has_native_pointers = info->has_native_pointers();
 638 }
 639 
 640 void HeapShared::set_has_native_pointers(oop src_obj) {
 641   CachedOopInfo* info = archived_object_cache()->get(src_obj);
 642   assert(info != nullptr, "must be");
 643   info->set_has_native_pointers();
 644 }
 645 
 646 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
 647 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
 648 void HeapShared::start_scanning_for_oops() {
 649   {
 650     NoSafepointVerifier nsv;
 651 
 652     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 653     // for convenience.
 654     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);

 655 
 656     // Cache for recording where the archived objects are copied to
 657     create_archived_object_cache();
 658 
 659     if (UseCompressedOops || UseG1GC) {
 660       log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 661                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 662                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 663                     UseCompressedOops ? p2i(CompressedOops::end()) :
 664                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 665     }
 666 
 667     archive_subgraphs();
 668   }
 669 
 670   init_seen_objects_table();
 671   Universe::archive_exception_instances();
 672 }
 673 
 674 void HeapShared::end_scanning_for_oops() {
 675   archive_strings();
 676   delete_seen_objects_table();
 677 }
 678 
 679 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
 680   {
 681     NoSafepointVerifier nsv;
 682     CDSHeapVerifier::verify();


 683     check_special_subgraph_classes();
 684   }
 685 
 686   StringTable::write_shared_table(_dumped_interned_strings);
 687   ArchiveHeapWriter::write(_pending_roots, heap_info);





 688 
 689   ArchiveBuilder::OtherROAllocMark mark;
 690   write_subgraph_info_table();
 691 }
 692 
 693 void HeapShared::scan_java_mirror(oop orig_mirror) {
 694   oop m = scratch_java_mirror(orig_mirror);
 695   if (m != nullptr) { // nullptr if for custom class loader
 696     copy_java_mirror_hashcode(orig_mirror, m);
 697     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 698     assert(success, "sanity");
 699   }
 700 }
 701 
 702 void HeapShared::scan_java_class(Klass* orig_k) {
 703   scan_java_mirror(orig_k->java_mirror());
 704 
 705   if (orig_k->is_instance_klass()) {
 706     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
 707     orig_ik->constants()->prepare_resolved_references_for_archiving();

1075                           which, k->external_name());
1076       FlagSetting fs1(VerifyBeforeGC, true);
1077       FlagSetting fs2(VerifyDuringGC, true);
1078       FlagSetting fs3(VerifyAfterGC,  true);
1079       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1080     }
1081   }
1082 }
1083 
1084 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1085 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1086 //
1087 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1088 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1089 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1090 void HeapShared::resolve_classes(JavaThread* current) {
1091   assert(CDSConfig::is_using_archive(), "runtime only!");
1092   if (!ArchiveHeapLoader::is_in_use()) {
1093     return; // nothing to do
1094   }













1095   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1096   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1097 }
1098 
1099 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1100   for (int i = 0; fields[i].valid(); i++) {
1101     ArchivableStaticFieldInfo* info = &fields[i];
1102     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1103     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1104     assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1105     resolve_classes_for_subgraph_of(current, k);
1106   }
1107 }
1108 
1109 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1110   JavaThread* THREAD = current;
1111   ExceptionMark em(THREAD);
1112   const ArchivedKlassSubGraphInfoRecord* record =
1113    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1114   if (HAS_PENDING_EXCEPTION) {

1450 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1451   PointsToOopsChecker points_to_oops_checker;
1452   obj->oop_iterate(&points_to_oops_checker);
1453   return CachedOopInfo(referrer, points_to_oops_checker.result());
1454 }
1455 
1456 void HeapShared::init_box_classes(TRAPS) {
1457   if (ArchiveHeapLoader::is_in_use()) {
1458     vmClasses::Boolean_klass()->initialize(CHECK);
1459     vmClasses::Character_klass()->initialize(CHECK);
1460     vmClasses::Float_klass()->initialize(CHECK);
1461     vmClasses::Double_klass()->initialize(CHECK);
1462     vmClasses::Byte_klass()->initialize(CHECK);
1463     vmClasses::Short_klass()->initialize(CHECK);
1464     vmClasses::Integer_klass()->initialize(CHECK);
1465     vmClasses::Long_klass()->initialize(CHECK);
1466     vmClasses::Void_klass()->initialize(CHECK);
1467   }
1468 }
1469 














1470 // (1) If orig_obj has not been archived yet, archive it.
1471 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1472 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1473 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1474 //     were already archived when this function is called)
1475 bool HeapShared::archive_reachable_objects_from(int level,
1476                                                 KlassSubGraphInfo* subgraph_info,
1477                                                 oop orig_obj) {
1478   assert(orig_obj != nullptr, "must be");
1479   PendingOopStack stack;
1480   stack.push(PendingOop(orig_obj, nullptr, level));
1481 
1482   while (stack.length() > 0) {
1483     PendingOop po = stack.pop();
1484     _object_being_archived = po;
1485     bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1486     _object_being_archived = PendingOop();
1487 
1488     if (!status) {
1489       // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1490       // as the Java code will take care of initializing this field dynamically.
1491       assert(level == 1, "VM should have exited with unarchivable objects for _level > 1");
1492       return false;
1493     }
1494   }
1495 
1496   return true;
1497 }
1498 
1499 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
1500                                  oop orig_obj, oop referrer) {
1501   assert(orig_obj != nullptr, "must be");
1502   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1503     // This object has injected fields that cannot be supported easily, so we disallow them for now.
1504     // If you get an error here, you probably made a change in the JDK library that has added
1505     // these objects that are referenced (directly or indirectly) by static fields.
1506     ResourceMark rm;
1507     log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1508     debug_trace();
1509     MetaspaceShared::unrecoverable_writing_error();
1510   }
1511 
1512   if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1513     ResourceMark rm;
1514     LogTarget(Debug, cds, heap) log;
1515     LogStream out(log);
1516     out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1517     Klass* k = java_lang_Class::as_Klass(orig_obj);
1518     if (k != nullptr) {
1519       out.print("%s", k->external_name());
1520     } else {
1521       out.print("primitive");
1522     }
1523     out.print_cr("; scratch mirror = "  PTR_FORMAT,
1524                  p2i(scratch_java_mirror(orig_obj)));
1525   }
1526 
1527   if (CDSConfig::is_initing_classes_at_dump_time()) {
1528     if (java_lang_Class::is_instance(orig_obj)) {
1529       orig_obj = scratch_java_mirror(orig_obj);

1565 
1566   bool already_archived = has_been_archived(orig_obj);
1567   bool record_klasses_only = already_archived;
1568   if (!already_archived) {
1569     ++_num_new_archived_objs;
1570     if (!archive_object(orig_obj, referrer, subgraph_info)) {
1571       // Skip archiving the sub-graph referenced from the current entry field.
1572       ResourceMark rm;
1573       log_error(cds, heap)(
1574         "Cannot archive the sub-graph referenced from %s object ("
1575         PTR_FORMAT ") size %zu, skipped.",
1576         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1577       if (level == 1) {
1578         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1579         // as the Java code will take care of initializing this field dynamically.
1580         return false;
1581       } else {
1582         // We don't know how to handle an object that has been archived, but some of its reachable
1583         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1584         // we have a real use case.
1585         MetaspaceShared::unrecoverable_writing_error();
1586       }
1587     }
1588   }
1589 
1590   Klass *orig_k = orig_obj->klass();
1591   subgraph_info->add_subgraph_object_klass(orig_k);
1592 
1593   {
1594     // Find all the oops that are referenced by orig_obj, push them onto the stack
1595     // so we can work on them next.
1596     ResourceMark rm;
1597     OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1598     orig_obj->oop_iterate(&pusher);
1599   }
1600 
1601   if (CDSConfig::is_initing_classes_at_dump_time()) {
1602     // The enum klasses are archived with aot-initialized mirror.
1603     // See AOTClassInitializer::can_archive_initialized_mirror().
1604   } else {
1605     if (CDSEnumKlass::is_enum_obj(orig_obj)) {

2019 
2020 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2021                                           bool is_full_module_graph) {
2022   _num_total_subgraph_recordings = 0;
2023   _num_total_walked_objs = 0;
2024   _num_total_archived_objs = 0;
2025   _num_total_recorded_klasses = 0;
2026   _num_total_verifications = 0;
2027 
2028   // For each class X that has one or more archived fields:
2029   // [1] Dump the subgraph of each archived field
2030   // [2] Create a list of all the class of the objects that can be reached
2031   //     by any of these static fields.
2032   //     At runtime, these classes are initialized before X's archived fields
2033   //     are restored by HeapShared::initialize_from_archived_subgraph().
2034   for (int i = 0; fields[i].valid(); ) {
2035     ArchivableStaticFieldInfo* info = &fields[i];
2036     const char* klass_name = info->klass_name;
2037     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2038 

2039     // If you have specified consecutive fields of the same klass in
2040     // fields[], these will be archived in the same
2041     // {start_recording_subgraph ... done_recording_subgraph} pass to
2042     // save time.
2043     for (; fields[i].valid(); i++) {
2044       ArchivableStaticFieldInfo* f = &fields[i];
2045       if (f->klass_name != klass_name) {
2046         break;
2047       }
2048 

2049       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2050                                                   f->offset, f->field_name);
2051     }
2052     done_recording_subgraph(info->klass, klass_name);
2053   }
2054 
2055   log_info(cds, heap)("Archived subgraph records = %d",
2056                       _num_total_subgraph_recordings);
2057   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
2058   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
2059   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2060 
2061 #ifndef PRODUCT
2062   for (int i = 0; fields[i].valid(); i++) {
2063     ArchivableStaticFieldInfo* f = &fields[i];
2064     verify_subgraph_from_static_field(f->klass, f->offset);
2065   }
2066   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
2067 #endif
2068 }

   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "cds/aotCacheAccess.hpp"
  26 #include "cds/aotArtifactFinder.hpp"
  27 #include "cds/aotClassInitializer.hpp"
  28 #include "cds/aotClassLocation.hpp"
  29 #include "cds/aotReferenceObjSupport.hpp"
  30 #include "cds/archiveBuilder.hpp"
  31 #include "cds/archiveHeapLoader.hpp"
  32 #include "cds/archiveHeapWriter.hpp"
  33 #include "cds/archiveUtils.hpp"
  34 #include "cds/cdsConfig.hpp"
  35 #include "cds/cdsEnumKlass.hpp"
  36 #include "cds/cdsHeapVerifier.hpp"
  37 #include "cds/heapShared.hpp"
  38 #include "cds/metaspaceShared.hpp"
  39 #include "classfile/classLoaderData.hpp"
  40 #include "classfile/classLoaderExt.hpp"
  41 #include "classfile/javaClasses.inline.hpp"
  42 #include "classfile/modules.hpp"
  43 #include "classfile/stringTable.hpp"
  44 #include "classfile/symbolTable.hpp"
  45 #include "classfile/systemDictionary.hpp"

  72 #include "gc/g1/g1CollectedHeap.hpp"
  73 #endif
  74 
  75 #if INCLUDE_CDS_JAVA_HEAP
  76 
  77 struct ArchivableStaticFieldInfo {
  78   const char* klass_name;
  79   const char* field_name;
  80   InstanceKlass* klass;
  81   int offset;
  82   BasicType type;
  83 
  84   ArchivableStaticFieldInfo(const char* k, const char* f)
  85   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  86 
  87   bool valid() {
  88     return klass_name != nullptr;
  89   }
  90 };
  91 
  92 class HeapShared::ContextMark : public StackObj {
  93   ResourceMark rm;
  94 public:
  95   ContextMark(const char* c) : rm{} {
  96     _context->push(c);
  97   }
  98   ~ContextMark() {
  99     _context->pop();
 100   }
 101 };
 102 
 103 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
 104 
 105 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
 106 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
 107 size_t HeapShared::_total_obj_count;
 108 size_t HeapShared::_total_obj_size;
 109 
 110 #ifndef PRODUCT
 111 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 112 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 113 static const char* _test_class_name = nullptr;
 114 static Klass* _test_class = nullptr;
 115 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 116 #endif
 117 
 118 
 119 //
 120 // If you add new entries to the following tables, you should know what you're doing!
 121 //
 122 
 123 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 124   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 125   {"java/lang/Long$LongCache",                    "archivedCache"},
 126   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 127   {"java/lang/Short$ShortCache",                  "archivedCache"},
 128   {"java/lang/Character$CharacterCache",          "archivedCache"},
 129   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 130   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 131   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 132   {"java/util/ImmutableCollections",              "archivedObjects"},
 133   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 134   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 135   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 136   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
 137 
 138 #ifndef PRODUCT
 139   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 140 #endif
 141   {nullptr, nullptr},
 142 };
 143 
 144 // full module graph
 145 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 146   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 147   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 148   {"java/lang/Module$ArchivedData",               "archivedData"},
 149   {nullptr, nullptr},
 150 };
 151 
 152 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 153 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 154 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
 155 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
 156 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
 157 int HeapShared::_root_segment_max_size_elems;
 158 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 159 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
 160 
 161 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 162   for (int i = 0; fields[i].valid(); i++) {
 163     if (fields[i].klass == ik) {
 164       return true;
 165     }
 166   }
 167   return false;
 168 }
 169 
 170 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 171   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 172          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 173 }
 174 
 175 unsigned HeapShared::oop_hash(oop const& p) {

 223                          CHECK);
 224   Handle boot_loader(THREAD, result.get_oop());
 225   reset_states(boot_loader(), CHECK);
 226 }
 227 
 228 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 229 
 230 bool HeapShared::has_been_archived(oop obj) {
 231   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 232   return archived_object_cache()->get(obj) != nullptr;
 233 }
 234 
 235 int HeapShared::append_root(oop obj) {
 236   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 237   if (obj != nullptr) {
 238     assert(has_been_archived(obj), "must be");
 239   }
 240   // No GC should happen since we aren't scanning _pending_roots.
 241   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 242 
 243   OopHandle oh(Universe::vm_global(), obj);
 244   return _pending_roots->append(oh);
 245 }
 246 
 247 objArrayOop HeapShared::root_segment(int segment_idx) {
 248   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
 249     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 250   } else {
 251     assert(CDSConfig::is_using_archive(), "must be");
 252   }
 253 
 254   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 255   assert(segment != nullptr, "should have been initialized");
 256   return segment;
 257 }
 258 
 259 inline unsigned int oop_handle_hash(const OopHandle& oh) {
 260   oop o = oh.resolve();
 261   if (o == nullptr) {
 262     return 0;
 263   } else {
 264     return o->identity_hash();
 265   }
 266 }
 267 
 268 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 269   return a.resolve() == b.resolve();
 270 }
 271 
 272 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
 273     36137, // prime number
 274     AnyObj::C_HEAP,
 275     mtClassShared,
 276     oop_handle_hash,
 277     oop_handle_equals> {};
 278 
 279 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
 280 
 281 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
 282   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 283   if (_orig_to_scratch_object_table == nullptr) {
 284     _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
 285   }
 286 
 287   OopHandle orig_h(Universe::vm_global(), orig_obj);
 288   OopHandle scratch_h(Universe::vm_global(), scratch_obj);
 289   _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
 290 }
 291 
 292 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
 293   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 294   if (_orig_to_scratch_object_table != nullptr) {
 295     OopHandle orig(&orig_obj);
 296     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 297     if (v != nullptr) {
 298       return v->resolve();
 299     }
 300   }
 301   return nullptr;
 302 }
 303 
 304 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
 305 // to Strings and MH oops.
 306 //
 307 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
 308 // and are accssed vis AOTCacheAccess::get_archived_object(int).
 309 struct PermanentOopInfo {
 310   int _index;       // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
 311   int _heap_offset; // Offset of the object from the bottom of the archived heap.
 312   PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
 313 };
 314 
 315 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
 316     36137, // prime number
 317     AnyObj::C_HEAP,
 318     mtClassShared,
 319     oop_handle_hash,
 320     oop_handle_equals> {};
 321 
 322 static int _dumptime_permanent_oop_count = 0;
 323 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
 324 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
 325 
 326 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
 327 // so we can remember their offset (from the bottom of the archived heap).
 328 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
 329   assert_at_safepoint();
 330   if (_dumptime_permanent_oop_table == nullptr) {
 331     _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
 332   }
 333 
 334   PermanentOopInfo info(-1, offset);
 335   OopHandle oh(Universe::vm_global(), obj);
 336   _dumptime_permanent_oop_table->put_when_absent(oh, info);
 337 }
 338 
 339 // A permanent index is assigned to an archived object ONLY when
 340 // the AOT compiler calls this function.
 341 int HeapShared::get_archived_object_permanent_index(oop obj) {
 342   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 343 
 344   if (!CDSConfig::is_dumping_heap()) {
 345     return -1; // Called by the Leyden old workflow
 346   }
 347   if (_dumptime_permanent_oop_table == nullptr) {
 348     return -1;
 349   }
 350 
 351   if (_orig_to_scratch_object_table != nullptr) {
 352     OopHandle orig(&obj);
 353     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 354     if (v != nullptr) {
 355       obj = v->resolve();
 356     }
 357   }
 358 
 359   OopHandle tmp(&obj);
 360   PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
 361   if (info == nullptr) {
 362     return -1;
 363   } else {
 364     if (info->_index < 0) {
 365       info->_index = _dumptime_permanent_oop_count++;
 366     }
 367     return info->_index;
 368   }
 369 }
 370 
 371 oop HeapShared::get_archived_object(int permanent_index) {
 372   assert(permanent_index >= 0, "sanity");
 373   assert(ArchiveHeapLoader::is_in_use(), "sanity");
 374   assert(_runtime_permanent_oops != nullptr, "sanity");
 375 
 376   return _runtime_permanent_oops->at(permanent_index).resolve();
 377 }
 378 
 379 // Remember all archived heap objects that have a permanent index.
 380 //   table[i] = offset of oop whose permanent index is i.
 381 void CachedCodeDirectoryInternal::dumptime_init_internal() {
 382   const int count = _dumptime_permanent_oop_count;
 383   if (count == 0) {
 384     // Avoid confusing CDS code with zero-sized tables, just return.
 385     log_info(cds)("No permanent oops");
 386     _permanent_oop_count = count;
 387     _permanent_oop_offsets = nullptr;
 388     return;
 389   }
 390 
 391   int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
 392   for (int i = 0; i < count; i++) {
 393     table[count] = -1;
 394   }
 395   _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
 396     int index = info._index;
 397     if (index >= 0) {
 398       assert(index < count, "sanity");
 399       table[index] = info._heap_offset;
 400     }
 401     return true; // continue
 402   });
 403 
 404   for (int i = 0; i < count; i++) {
 405     assert(table[i] >= 0, "must be");
 406   }
 407 
 408   log_info(cds)("Dumped %d permanent oops", count);
 409 
 410   _permanent_oop_count = count;
 411   AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
 412 }
 413 
 414 // This is called during the bootstrap of the production run, before any GC can happen.
 415 // Record each permanent oop in a OopHandle for GC safety.
 416 void CachedCodeDirectoryInternal::runtime_init_internal() {
 417   int count = _permanent_oop_count;
 418   int* table = _permanent_oop_offsets;
 419   _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
 420   for (int i = 0; i < count; i++) {
 421     oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
 422     OopHandle oh(Universe::vm_global(), obj);
 423     _runtime_permanent_oops->append(oh);
 424   }
 425 };
 426 
 427 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 428   assert(_root_segment_max_size_elems > 0, "sanity");
 429 
 430   // Try to avoid divisions for the common case.
 431   if (idx < _root_segment_max_size_elems) {
 432     seg_idx = 0;
 433     int_idx = idx;
 434   } else {
 435     seg_idx = idx / _root_segment_max_size_elems;
 436     int_idx = idx % _root_segment_max_size_elems;
 437   }
 438 
 439   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 440          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 441 }
 442 
 443 // Returns an objArray that contains all the roots of the archived objects
 444 oop HeapShared::get_root(int index, bool clear) {
 445   assert(index >= 0, "sanity");
 446   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 571     OopHandle* handle = get(ptr);
 572     if (handle != nullptr) {
 573       handle->release(Universe::vm_global());
 574       remove(ptr);
 575     }
 576   }
 577 };
 578 
 579 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 580   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 581     _scratch_objects_table->set_oop(src, dest);
 582   }
 583 }
 584 
 585 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 586   return (objArrayOop)_scratch_objects_table->get_oop(src);
 587 }
 588 
 589 void HeapShared::init_dumping() {
 590   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 591   _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
 592 }
 593 
 594 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 595   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 596     BasicType bt = (BasicType)i;
 597     if (!is_reference_type(bt)) {
 598       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 599       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 600       track_scratch_object(Universe::java_mirror(bt), m);
 601     }
 602   }
 603 }
 604 
 605 // Given java_mirror that represents a (primitive or reference) type T,
 606 // return the "scratch" version that represents the same type T.
 607 // Note that if java_mirror will be returned if it's already a
 608 // scratch mirror.
 609 //
 610 // See java_lang_Class::create_scratch_mirror() for more info.
 611 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 612   assert(java_lang_Class::is_instance(java_mirror), "must be");
 613 
 614   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 615     BasicType bt = (BasicType)i;
 616     if (!is_reference_type(bt)) {
 617       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 618         return java_mirror;
 619       }
 620     }
 621   }
 622 
 623   if (java_lang_Class::is_primitive(java_mirror)) {
 624     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 625   } else {
 626     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 627   }
 628 }
 629 
 630 oop HeapShared::scratch_java_mirror(BasicType t) {
 631   assert((uint)t < T_VOID+1, "range check");
 632   assert(!is_reference_type(t), "sanity");
 633   return _scratch_basic_type_mirrors[t].resolve();
 634 }
 635 
 636 oop HeapShared::scratch_java_mirror(Klass* k) {
 637   return _scratch_objects_table->get_oop(k);
 638 }
 639 
 640 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
 641   track_scratch_object(k->java_mirror(), mirror);
 642   _scratch_objects_table->set_oop(k, mirror);
 643 }
 644 
 645 void HeapShared::remove_scratch_objects(Klass* k) {
 646   // Klass is being deallocated. Java mirror can still be alive, and it should not
 647   // point to dead klass. We need to break the link from mirror to the Klass.
 648   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 649   oop mirror = _scratch_objects_table->get_oop(k);
 650   if (mirror != nullptr) {
 651     java_lang_Class::set_klass(mirror, nullptr);
 652   }
 653   _scratch_objects_table->remove_oop(k);
 654   if (k->is_instance_klass()) {
 655     _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
 656   }
 657   if (mirror != nullptr) {
 658     OopHandle tmp(&mirror);
 659     OopHandle* v = _orig_to_scratch_object_table->get(tmp);
 660     if (v != nullptr) {
 661       oop scratch_mirror = v->resolve();
 662       java_lang_Class::set_klass(scratch_mirror, nullptr);
 663       _orig_to_scratch_object_table->remove(tmp);
 664     }
 665   }
 666 }
 667 
 668 //TODO: we eventually want a more direct test for these kinds of things.
 669 //For example the JVM could record some bit of context from the creation
 670 //of the klass, such as who called the hidden class factory.  Using
 671 //string compares on names is fragile and will break as soon as somebody
 672 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 673 //related ideas about marking AOT-related classes.
 674 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 675   return ik->is_hidden() &&
 676     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 677      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 678      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 679      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 680 }
 681 
 682 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 683   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 684 }
 685 

 829   assert(info != nullptr, "must be");
 830   has_oop_pointers = info->has_oop_pointers();
 831   has_native_pointers = info->has_native_pointers();
 832 }
 833 
 834 void HeapShared::set_has_native_pointers(oop src_obj) {
 835   CachedOopInfo* info = archived_object_cache()->get(src_obj);
 836   assert(info != nullptr, "must be");
 837   info->set_has_native_pointers();
 838 }
 839 
 840 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
 841 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
 842 void HeapShared::start_scanning_for_oops() {
 843   {
 844     NoSafepointVerifier nsv;
 845 
 846     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 847     // for convenience.
 848     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
 849     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
 850 
 851     // Cache for recording where the archived objects are copied to
 852     create_archived_object_cache();
 853 
 854     if (UseCompressedOops || UseG1GC) {
 855       log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 856                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 857                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 858                     UseCompressedOops ? p2i(CompressedOops::end()) :
 859                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 860     }
 861 
 862     archive_subgraphs();
 863   }
 864 
 865   init_seen_objects_table();
 866   Universe::archive_exception_instances();
 867 }
 868 
 869 void HeapShared::end_scanning_for_oops() {
 870   archive_strings();
 871   delete_seen_objects_table();
 872 }
 873 
 874 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
 875   {
 876     NoSafepointVerifier nsv;
 877     if (!SkipArchiveHeapVerification) {
 878       CDSHeapVerifier::verify();
 879     }
 880     check_special_subgraph_classes();
 881   }
 882 
 883   StringTable::write_shared_table(_dumped_interned_strings);
 884   GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
 885   for (int i = 0; i < _pending_roots->length(); i++) {
 886     roots->append(_pending_roots->at(i).resolve());
 887   }
 888   ArchiveHeapWriter::write(roots, heap_info);
 889   delete roots;
 890 
 891   ArchiveBuilder::OtherROAllocMark mark;
 892   write_subgraph_info_table();
 893 }
 894 
 895 void HeapShared::scan_java_mirror(oop orig_mirror) {
 896   oop m = scratch_java_mirror(orig_mirror);
 897   if (m != nullptr) { // nullptr if for custom class loader
 898     copy_java_mirror_hashcode(orig_mirror, m);
 899     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
 900     assert(success, "sanity");
 901   }
 902 }
 903 
 904 void HeapShared::scan_java_class(Klass* orig_k) {
 905   scan_java_mirror(orig_k->java_mirror());
 906 
 907   if (orig_k->is_instance_klass()) {
 908     InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
 909     orig_ik->constants()->prepare_resolved_references_for_archiving();

1277                           which, k->external_name());
1278       FlagSetting fs1(VerifyBeforeGC, true);
1279       FlagSetting fs2(VerifyDuringGC, true);
1280       FlagSetting fs3(VerifyAfterGC,  true);
1281       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1282     }
1283   }
1284 }
1285 
1286 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1287 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1288 //
1289 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1290 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1291 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1292 void HeapShared::resolve_classes(JavaThread* current) {
1293   assert(CDSConfig::is_using_archive(), "runtime only!");
1294   if (!ArchiveHeapLoader::is_in_use()) {
1295     return; // nothing to do
1296   }
1297 
1298   if (!CDSConfig::is_using_aot_linked_classes()) {
1299     assert( _run_time_special_subgraph != nullptr, "must be");
1300     Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1301     if (klasses != nullptr) {
1302       for (int i = 0; i < klasses->length(); i++) {
1303         Klass* k = klasses->at(i);
1304         ExceptionMark em(current); // no exception can happen here
1305         resolve_or_init(k, /*do_init*/false, current);
1306       }
1307     }
1308   }
1309 
1310   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1311   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1312 }
1313 
1314 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1315   for (int i = 0; fields[i].valid(); i++) {
1316     ArchivableStaticFieldInfo* info = &fields[i];
1317     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1318     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1319     assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1320     resolve_classes_for_subgraph_of(current, k);
1321   }
1322 }
1323 
1324 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1325   JavaThread* THREAD = current;
1326   ExceptionMark em(THREAD);
1327   const ArchivedKlassSubGraphInfoRecord* record =
1328    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1329   if (HAS_PENDING_EXCEPTION) {

1665 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1666   PointsToOopsChecker points_to_oops_checker;
1667   obj->oop_iterate(&points_to_oops_checker);
1668   return CachedOopInfo(referrer, points_to_oops_checker.result());
1669 }
1670 
1671 void HeapShared::init_box_classes(TRAPS) {
1672   if (ArchiveHeapLoader::is_in_use()) {
1673     vmClasses::Boolean_klass()->initialize(CHECK);
1674     vmClasses::Character_klass()->initialize(CHECK);
1675     vmClasses::Float_klass()->initialize(CHECK);
1676     vmClasses::Double_klass()->initialize(CHECK);
1677     vmClasses::Byte_klass()->initialize(CHECK);
1678     vmClasses::Short_klass()->initialize(CHECK);
1679     vmClasses::Integer_klass()->initialize(CHECK);
1680     vmClasses::Long_klass()->initialize(CHECK);
1681     vmClasses::Void_klass()->initialize(CHECK);
1682   }
1683 }
1684 
1685 void HeapShared::exit_on_error() {
1686   if (_context != nullptr) {
1687     ResourceMark rm;
1688     LogStream ls(Log(cds, heap)::error());
1689     ls.print_cr("Context");
1690     for (int i = 0; i < _context->length(); i++) {
1691       const char* s = _context->at(i);
1692       ls.print_cr("- %s", s);
1693     }
1694   }
1695   debug_trace();
1696   MetaspaceShared::unrecoverable_writing_error();
1697 }
1698 
1699 // (1) If orig_obj has not been archived yet, archive it.
1700 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1701 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1702 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1703 //     were already archived when this function is called)
1704 bool HeapShared::archive_reachable_objects_from(int level,
1705                                                 KlassSubGraphInfo* subgraph_info,
1706                                                 oop orig_obj) {
1707   assert(orig_obj != nullptr, "must be");
1708   PendingOopStack stack;
1709   stack.push(PendingOop(orig_obj, nullptr, level));
1710 
1711   while (stack.length() > 0) {
1712     PendingOop po = stack.pop();
1713     _object_being_archived = po;
1714     bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1715     _object_being_archived = PendingOop();
1716 
1717     if (!status) {
1718       // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1719       // as the Java code will take care of initializing this field dynamically.
1720       assert(level == 1, "VM should have exited with unarchivable objects for _level > 1");
1721       return false;
1722     }
1723   }
1724 
1725   return true;
1726 }
1727 
1728 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
1729                                  oop orig_obj, oop referrer) {
1730   assert(orig_obj != nullptr, "must be");
1731   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1732     // This object has injected fields that cannot be supported easily, so we disallow them for now.
1733     // If you get an error here, you probably made a change in the JDK library that has added
1734     // these objects that are referenced (directly or indirectly) by static fields.
1735     ResourceMark rm;
1736     log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1737     exit_on_error();

1738   }
1739 
1740   if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1741     ResourceMark rm;
1742     LogTarget(Debug, cds, heap) log;
1743     LogStream out(log);
1744     out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1745     Klass* k = java_lang_Class::as_Klass(orig_obj);
1746     if (k != nullptr) {
1747       out.print("%s", k->external_name());
1748     } else {
1749       out.print("primitive");
1750     }
1751     out.print_cr("; scratch mirror = "  PTR_FORMAT,
1752                  p2i(scratch_java_mirror(orig_obj)));
1753   }
1754 
1755   if (CDSConfig::is_initing_classes_at_dump_time()) {
1756     if (java_lang_Class::is_instance(orig_obj)) {
1757       orig_obj = scratch_java_mirror(orig_obj);

1793 
1794   bool already_archived = has_been_archived(orig_obj);
1795   bool record_klasses_only = already_archived;
1796   if (!already_archived) {
1797     ++_num_new_archived_objs;
1798     if (!archive_object(orig_obj, referrer, subgraph_info)) {
1799       // Skip archiving the sub-graph referenced from the current entry field.
1800       ResourceMark rm;
1801       log_error(cds, heap)(
1802         "Cannot archive the sub-graph referenced from %s object ("
1803         PTR_FORMAT ") size %zu, skipped.",
1804         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1805       if (level == 1) {
1806         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1807         // as the Java code will take care of initializing this field dynamically.
1808         return false;
1809       } else {
1810         // We don't know how to handle an object that has been archived, but some of its reachable
1811         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1812         // we have a real use case.
1813         exit_on_error();
1814       }
1815     }
1816   }
1817 
1818   Klass *orig_k = orig_obj->klass();
1819   subgraph_info->add_subgraph_object_klass(orig_k);
1820 
1821   {
1822     // Find all the oops that are referenced by orig_obj, push them onto the stack
1823     // so we can work on them next.
1824     ResourceMark rm;
1825     OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1826     orig_obj->oop_iterate(&pusher);
1827   }
1828 
1829   if (CDSConfig::is_initing_classes_at_dump_time()) {
1830     // The enum klasses are archived with aot-initialized mirror.
1831     // See AOTClassInitializer::can_archive_initialized_mirror().
1832   } else {
1833     if (CDSEnumKlass::is_enum_obj(orig_obj)) {

2247 
2248 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2249                                           bool is_full_module_graph) {
2250   _num_total_subgraph_recordings = 0;
2251   _num_total_walked_objs = 0;
2252   _num_total_archived_objs = 0;
2253   _num_total_recorded_klasses = 0;
2254   _num_total_verifications = 0;
2255 
2256   // For each class X that has one or more archived fields:
2257   // [1] Dump the subgraph of each archived field
2258   // [2] Create a list of all the class of the objects that can be reached
2259   //     by any of these static fields.
2260   //     At runtime, these classes are initialized before X's archived fields
2261   //     are restored by HeapShared::initialize_from_archived_subgraph().
2262   for (int i = 0; fields[i].valid(); ) {
2263     ArchivableStaticFieldInfo* info = &fields[i];
2264     const char* klass_name = info->klass_name;
2265     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2266 
2267     ContextMark cm(klass_name);
2268     // If you have specified consecutive fields of the same klass in
2269     // fields[], these will be archived in the same
2270     // {start_recording_subgraph ... done_recording_subgraph} pass to
2271     // save time.
2272     for (; fields[i].valid(); i++) {
2273       ArchivableStaticFieldInfo* f = &fields[i];
2274       if (f->klass_name != klass_name) {
2275         break;
2276       }
2277 
2278       ContextMark cm(f->field_name);
2279       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2280                                                   f->offset, f->field_name);
2281     }
2282     done_recording_subgraph(info->klass, klass_name);
2283   }
2284 
2285   log_info(cds, heap)("Archived subgraph records = %d",
2286                       _num_total_subgraph_recordings);
2287   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
2288   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
2289   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2290 
2291 #ifndef PRODUCT
2292   for (int i = 0; fields[i].valid(); i++) {
2293     ArchivableStaticFieldInfo* f = &fields[i];
2294     verify_subgraph_from_static_field(f->klass, f->offset);
2295   }
2296   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
2297 #endif
2298 }
< prev index next >