< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/archiveBuilder.hpp"
  28 #include "cds/archiveHeapLoader.hpp"
  29 #include "cds/archiveHeapWriter.hpp"
  30 #include "cds/archiveUtils.hpp"

  31 #include "cds/cdsConfig.hpp"
  32 #include "cds/cdsEnumKlass.hpp"
  33 #include "cds/cdsHeapVerifier.hpp"
  34 #include "cds/heapShared.hpp"
  35 #include "cds/metaspaceShared.hpp"
  36 #include "classfile/classLoaderData.hpp"
  37 #include "classfile/classLoaderExt.hpp"
  38 #include "classfile/javaClasses.inline.hpp"
  39 #include "classfile/modules.hpp"
  40 #include "classfile/stringTable.hpp"
  41 #include "classfile/symbolTable.hpp"
  42 #include "classfile/systemDictionary.hpp"
  43 #include "classfile/systemDictionaryShared.hpp"
  44 #include "classfile/vmClasses.hpp"
  45 #include "classfile/vmSymbols.hpp"
  46 #include "gc/shared/collectedHeap.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcVMOperations.hpp"
  49 #include "logging/log.hpp"
  50 #include "logging/logStream.hpp"

  69 #include "gc/g1/g1CollectedHeap.hpp"
  70 #endif
  71 
  72 #if INCLUDE_CDS_JAVA_HEAP
  73 
  74 struct ArchivableStaticFieldInfo {
  75   const char* klass_name;
  76   const char* field_name;
  77   InstanceKlass* klass;
  78   int offset;
  79   BasicType type;
  80 
  81   ArchivableStaticFieldInfo(const char* k, const char* f)
  82   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  83 
  84   bool valid() {
  85     return klass_name != nullptr;
  86   }
  87 };
  88 





















  89 bool HeapShared::_disable_writing = false;
  90 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
  91 
  92 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
  93 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
  94 size_t HeapShared::_total_obj_count;
  95 size_t HeapShared::_total_obj_size;
  96 
  97 #ifndef PRODUCT
  98 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
  99 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 100 static const char* _test_class_name = nullptr;
 101 static Klass* _test_class = nullptr;
 102 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 103 #endif
 104 
 105 
 106 //
 107 // If you add new entries to the following tables, you should know what you're doing!
 108 //
 109 
 110 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 111   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 112   {"java/lang/Long$LongCache",                    "archivedCache"},
 113   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 114   {"java/lang/Short$ShortCache",                  "archivedCache"},
 115   {"java/lang/Character$CharacterCache",          "archivedCache"},
 116   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 117   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 118   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 119   {"java/util/ImmutableCollections",              "archivedObjects"},
 120   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 121   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 122   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},

 123 
 124 #ifndef PRODUCT
 125   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 126 #endif
 127   {nullptr, nullptr},
 128 };
 129 
 130 // full module graph
 131 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 132   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 133   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 134   {"java/lang/Module$ArchivedData",               "archivedData"},
 135   {nullptr, nullptr},
 136 };
 137 
 138 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 139 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 140 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;


 141 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
 142 int HeapShared::_root_segment_max_size_elems;
 143 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 144 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
 145 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
 146 
 147 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 148   for (int i = 0; fields[i].valid(); i++) {
 149     if (fields[i].klass == ik) {
 150       return true;
 151     }
 152   }
 153   return false;
 154 }
 155 
 156 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 157   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 158          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 159 }
 160 

 208                          vmSymbols::void_BuiltinClassLoader_signature(),
 209                          CHECK);
 210   Handle boot_loader(THREAD, result.get_oop());
 211   reset_states(boot_loader(), CHECK);
 212 }
 213 
 214 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 215 
 216 bool HeapShared::has_been_archived(oop obj) {
 217   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 218   return archived_object_cache()->get(obj) != nullptr;
 219 }
 220 
 221 int HeapShared::append_root(oop obj) {
 222   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 223 
 224   // No GC should happen since we aren't scanning _pending_roots.
 225   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 226 
 227   if (_pending_roots == nullptr) {
 228     _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 229   }
 230 
 231   return _pending_roots->append(obj);

 232 }
 233 
 234 objArrayOop HeapShared::root_segment(int segment_idx) {
 235   if (CDSConfig::is_dumping_heap()) {
 236     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 237     if (!HeapShared::can_write()) {
 238       return nullptr;
 239     }
 240   } else {
 241     assert(CDSConfig::is_using_archive(), "must be");
 242   }
 243 
 244   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 245   assert(segment != nullptr, "should have been initialized");
 246   return segment;
 247 }
 248 
































































































































































 249 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 250   assert(_root_segment_max_size_elems > 0, "sanity");
 251 
 252   // Try to avoid divisions for the common case.
 253   if (idx < _root_segment_max_size_elems) {
 254     seg_idx = 0;
 255     int_idx = idx;
 256   } else {
 257     seg_idx = idx / _root_segment_max_size_elems;
 258     int_idx = idx % _root_segment_max_size_elems;
 259   }
 260 
 261   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 262          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 263 }
 264 
 265 // Returns an objArray that contains all the roots of the archived objects
 266 oop HeapShared::get_root(int index, bool clear) {
 267   assert(index >= 0, "sanity");
 268   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 350       return nullptr;
 351     }
 352   }
 353   void set_oop(MetaspaceObj* ptr, oop o) {
 354     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 355     OopHandle handle(Universe::vm_global(), o);
 356     bool is_new = put(ptr, handle);
 357     assert(is_new, "cannot set twice");
 358   }
 359   void remove_oop(MetaspaceObj* ptr) {
 360     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 361     OopHandle* handle = get(ptr);
 362     if (handle != nullptr) {
 363       handle->release(Universe::vm_global());
 364       remove(ptr);
 365     }
 366   }
 367 };
 368 
 369 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {



 370   _scratch_references_table->set_oop(src, dest);
 371 }
 372 
 373 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 374   return (objArrayOop)_scratch_references_table->get_oop(src);
 375 }
 376 
 377 void HeapShared::init_scratch_objects(TRAPS) {
 378   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 379     BasicType bt = (BasicType)i;
 380     if (!is_reference_type(bt)) {
 381       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 382       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);

 383     }
 384   }
 385   _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
 386   _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();


 387 }
 388 
 389 // Given java_mirror that represents a (primitive or reference) type T,
 390 // return the "scratch" version that represents the same type T.
 391 // Note that if java_mirror will be returned if it's already a
 392 // scratch mirror.
 393 //
 394 // See java_lang_Class::create_scratch_mirror() for more info.
 395 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 396   assert(java_lang_Class::is_instance(java_mirror), "must be");
 397 
 398   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 399     BasicType bt = (BasicType)i;
 400     if (!is_reference_type(bt)) {
 401       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 402         return java_mirror;
 403       }
 404     }
 405   }
 406 
 407   if (java_lang_Class::is_primitive(java_mirror)) {
 408     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 409   } else {
 410     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 411   }
 412 }
 413 
 414 oop HeapShared::scratch_java_mirror(BasicType t) {
 415   assert((uint)t < T_VOID+1, "range check");
 416   assert(!is_reference_type(t), "sanity");
 417   return _scratch_basic_type_mirrors[t].resolve();
 418 }
 419 
 420 oop HeapShared::scratch_java_mirror(Klass* k) {
 421   return _scratch_java_mirror_table->get_oop(k);
 422 }
 423 
 424 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {

 425   _scratch_java_mirror_table->set_oop(k, mirror);
 426 }
 427 
 428 void HeapShared::remove_scratch_objects(Klass* k) {
 429   // Klass is being deallocated. Java mirror can still be alive, and it should not
 430   // point to dead klass. We need to break the link from mirror to the Klass.
 431   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 432   oop mirror = _scratch_java_mirror_table->get_oop(k);
 433   if (mirror != nullptr) {
 434     java_lang_Class::set_klass(mirror, nullptr);
 435   }
 436   _scratch_java_mirror_table->remove_oop(k);
 437   if (k->is_instance_klass()) {
 438     _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
 439   }









 440 }
 441 
 442 //TODO: we eventually want a more direct test for these kinds of things.
 443 //For example the JVM could record some bit of context from the creation
 444 //of the klass, such as who called the hidden class factory.  Using
 445 //string compares on names is fragile and will break as soon as somebody
 446 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 447 //related ideas about marking AOT-related classes.
 448 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 449   return ik->is_hidden() &&
 450     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 451      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 452      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 453      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 454 }
 455 
 456 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 457   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 458 }
 459 

 754         mark_required_if_hidden_class(java_lang_Class::as_Klass(o));
 755       } else if (java_lang_invoke_ResolvedMethodName::is_instance(o)) {
 756         Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(o);
 757         if (m != nullptr) {
 758           mark_required_if_hidden_class(m->method_holder());
 759         }
 760       }
 761 
 762       o->oop_iterate(&c);
 763     }
 764   }
 765 }
 766 
 767 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
 768   {
 769     NoSafepointVerifier nsv;
 770 
 771     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 772     // for convenience.
 773     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);


 774 
 775     // Cache for recording where the archived objects are copied to
 776     create_archived_object_cache();
 777 
 778     if (UseCompressedOops || UseG1GC) {
 779       log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 780                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 781                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 782                     UseCompressedOops ? p2i(CompressedOops::end()) :
 783                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 784     }
 785     copy_objects();
 786 
 787     CDSHeapVerifier::verify();


 788     check_special_subgraph_classes();
 789   }
 790 
 791   ArchiveHeapWriter::write(_pending_roots, heap_info);




 792 }
 793 
 794 void HeapShared::copy_interned_strings() {
 795   init_seen_objects_table();
 796 
 797   auto copier = [&] (oop s, bool value_ignored) {
 798     assert(s != nullptr, "sanity");
 799     assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
 800     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, s);
 801     assert(success, "must be");
 802     // Prevent string deduplication from changing the value field to
 803     // something not in the archive.
 804     java_lang_String::set_deduplication_forbidden(s);
 805   };
 806   _dumped_interned_strings->iterate_all(copier);
 807 
 808   delete_seen_objects_table();
 809 }
 810 
 811 void HeapShared::copy_special_subgraph() {

 907     // ArchiveHeapTestClass is used for a very small number of internal regression
 908     // tests (non-product builds only). It may initialize some unexpected classes.
 909     if (ArchiveHeapTestClass == nullptr)
 910 #endif
 911     {
 912       if (!src_ik->in_javabase_module()) {
 913         // Class/interface types in the boot loader may have been initialized as side effects
 914         // of JVM bootstrap code, so they are fine. But we need to check all other classes.
 915         if (buffered_ik->is_interface()) {
 916           // This probably means a bug in AOTConstantPoolResolver.::is_indy_resolution_deterministic()
 917           guarantee(!buffered_ik->interface_needs_clinit_execution_as_super(),
 918                     "should not have initialized an interface whose <clinit> might have unpredictable side effects");
 919         } else {
 920           // "normal" classes
 921           guarantee(HeapShared::is_archivable_hidden_klass(buffered_ik),
 922                     "should not have initialized any non-interface, non-hidden classes outside of java.base");
 923         }
 924       }
 925     }
 926 





 927     buffered_ik->set_has_aot_initialized_mirror();
 928     if (AOTClassInitializer::is_runtime_setup_required(src_ik)) {
 929       buffered_ik->set_is_runtime_setup_required();
 930     }
 931     made_progress = true;
 932 
 933     InstanceKlass* super = buffered_ik->java_super();
 934     if (super != nullptr) {
 935       mark_for_aot_initialization(super);
 936     }
 937 
 938     Array<InstanceKlass*>* interfaces = buffered_ik->transitive_interfaces();
 939     for (int i = 0; i < interfaces->length(); i++) {
 940       InstanceKlass* intf = interfaces->at(i);
 941       mark_for_aot_initialization(intf);
 942       if (!intf->is_initialized()) {
 943         assert(!intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false), "sanity");
 944         assert(!intf->has_aot_initialized_mirror(), "must not be marked");
 945       }
 946     }

1132       return;
1133     }
1134   } else {
1135     assert(buffered_k->is_typeArray_klass(), "must be");
1136     // Primitive type arrays are created early during Universe::genesis.
1137     return;
1138   }
1139 
1140   if (log_is_enabled(Debug, cds, heap)) {
1141     if (!_subgraph_object_klasses->contains(buffered_k)) {
1142       ResourceMark rm;
1143       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
1144     }
1145   }
1146 
1147   _subgraph_object_klasses->append_if_missing(buffered_k);
1148   _has_non_early_klasses |= is_non_early_klass(orig_k);
1149 }
1150 
1151 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {




1152   if (ik->module()->name() == vmSymbols::java_base()) {
1153     assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
1154     return;
1155   }
1156 
1157   const char* lambda_msg = "";
1158   if (CDSConfig::is_dumping_invokedynamic()) {
1159     lambda_msg = ", or a lambda proxy class";
1160     if (HeapShared::is_lambda_proxy_klass(ik) &&
1161         (ik->class_loader() == nullptr ||
1162          ik->class_loader() == SystemDictionary::java_platform_loader() ||
1163          ik->class_loader() == SystemDictionary::java_system_loader())) {
1164       return;
1165     }
1166   }
1167 
1168 #ifndef PRODUCT
1169   if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) {
1170     // This class is loaded by ArchiveHeapTestClass
1171     return;

1369                           which, k->external_name());
1370       FlagSetting fs1(VerifyBeforeGC, true);
1371       FlagSetting fs2(VerifyDuringGC, true);
1372       FlagSetting fs3(VerifyAfterGC,  true);
1373       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1374     }
1375   }
1376 }
1377 
1378 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1379 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1380 //
1381 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1382 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1383 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1384 void HeapShared::resolve_classes(JavaThread* current) {
1385   assert(CDSConfig::is_using_archive(), "runtime only!");
1386   if (!ArchiveHeapLoader::is_in_use()) {
1387     return; // nothing to do
1388   }













1389   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1390   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1391 }
1392 
1393 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1394   for (int i = 0; fields[i].valid(); i++) {
1395     ArchivableStaticFieldInfo* info = &fields[i];
1396     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1397     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1398     assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1399     resolve_classes_for_subgraph_of(current, k);
1400   }
1401 }
1402 
1403 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1404   JavaThread* THREAD = current;
1405   ExceptionMark em(THREAD);
1406   const ArchivedKlassSubGraphInfoRecord* record =
1407    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1408   if (HAS_PENDING_EXCEPTION) {

1737   oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1738   PointsToOopsChecker points_to_oops_checker;
1739   obj->oop_iterate(&points_to_oops_checker);
1740   return CachedOopInfo(referrer, points_to_oops_checker.result());
1741 }
1742 
1743 void HeapShared::init_box_classes(TRAPS) {
1744   if (ArchiveHeapLoader::is_in_use()) {
1745     vmClasses::Boolean_klass()->initialize(CHECK);
1746     vmClasses::Character_klass()->initialize(CHECK);
1747     vmClasses::Float_klass()->initialize(CHECK);
1748     vmClasses::Double_klass()->initialize(CHECK);
1749     vmClasses::Byte_klass()->initialize(CHECK);
1750     vmClasses::Short_klass()->initialize(CHECK);
1751     vmClasses::Integer_klass()->initialize(CHECK);
1752     vmClasses::Long_klass()->initialize(CHECK);
1753     vmClasses::Void_klass()->initialize(CHECK);
1754   }
1755 }
1756 
























1757 // (1) If orig_obj has not been archived yet, archive it.
1758 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1759 //     trace all  objects that are reachable from it, and make sure these objects are archived.
1760 // (3) Record the klasses of all orig_obj and all reachable objects.
1761 bool HeapShared::archive_reachable_objects_from(int level,
1762                                                 KlassSubGraphInfo* subgraph_info,
1763                                                 oop orig_obj) {

1764   assert(orig_obj != nullptr, "must be");
1765 
1766   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1767     // This object has injected fields that cannot be supported easily, so we disallow them for now.
1768     // If you get an error here, you probably made a change in the JDK library that has added
1769     // these objects that are referenced (directly or indirectly) by static fields.
1770     ResourceMark rm;
1771     log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1772     debug_trace();
1773     MetaspaceShared::unrecoverable_writing_error();
1774   }
1775 
1776   if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1777     ResourceMark rm;
1778     LogTarget(Debug, cds, heap) log;
1779     LogStream out(log);
1780     out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1781     Klass* k = java_lang_Class::as_Klass(orig_obj);
1782     if (k != nullptr) {
1783       out.print("%s", k->external_name());
1784     } else {
1785       out.print("primitive");
1786     }
1787     out.print_cr("; scratch mirror = "  PTR_FORMAT,
1788                  p2i(scratch_java_mirror(orig_obj)));
1789   }
1790 
1791   if (CDSConfig::is_initing_classes_at_dump_time()) {
1792     if (java_lang_Class::is_instance(orig_obj)) {
1793       orig_obj = scratch_java_mirror(orig_obj);

1829 
1830   bool already_archived = has_been_archived(orig_obj);
1831   bool record_klasses_only = already_archived;
1832   if (!already_archived) {
1833     ++_num_new_archived_objs;
1834     if (!archive_object(orig_obj)) {
1835       // Skip archiving the sub-graph referenced from the current entry field.
1836       ResourceMark rm;
1837       log_error(cds, heap)(
1838         "Cannot archive the sub-graph referenced from %s object ("
1839         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1840         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1841       if (level == 1) {
1842         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1843         // as the Java code will take care of initializing this field dynamically.
1844         return false;
1845       } else {
1846         // We don't know how to handle an object that has been archived, but some of its reachable
1847         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1848         // we have a real use case.
1849         MetaspaceShared::unrecoverable_writing_error();
1850       }
1851     }
1852   }
1853 
1854   Klass *orig_k = orig_obj->klass();
1855   subgraph_info->add_subgraph_object_klass(orig_k);
1856 
1857   WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1858   orig_obj->oop_iterate(&walker);
1859 
1860   if (CDSConfig::is_initing_classes_at_dump_time()) {
1861     // The enum klasses are archived with aot-initialized mirror.
1862     // See AOTClassInitializer::can_archive_initialized_mirror().
1863   } else {
1864     if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1865       CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1866     }
1867   }
1868 
1869   return true;

2278 
2279 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2280                                           bool is_full_module_graph) {
2281   _num_total_subgraph_recordings = 0;
2282   _num_total_walked_objs = 0;
2283   _num_total_archived_objs = 0;
2284   _num_total_recorded_klasses = 0;
2285   _num_total_verifications = 0;
2286 
2287   // For each class X that has one or more archived fields:
2288   // [1] Dump the subgraph of each archived field
2289   // [2] Create a list of all the class of the objects that can be reached
2290   //     by any of these static fields.
2291   //     At runtime, these classes are initialized before X's archived fields
2292   //     are restored by HeapShared::initialize_from_archived_subgraph().
2293   for (int i = 0; fields[i].valid(); ) {
2294     ArchivableStaticFieldInfo* info = &fields[i];
2295     const char* klass_name = info->klass_name;
2296     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2297 

2298     // If you have specified consecutive fields of the same klass in
2299     // fields[], these will be archived in the same
2300     // {start_recording_subgraph ... done_recording_subgraph} pass to
2301     // save time.
2302     for (; fields[i].valid(); i++) {
2303       ArchivableStaticFieldInfo* f = &fields[i];
2304       if (f->klass_name != klass_name) {
2305         break;
2306       }
2307 

2308       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2309                                                   f->offset, f->field_name);
2310     }
2311     done_recording_subgraph(info->klass, klass_name);
2312   }
2313 
2314   log_info(cds, heap)("Archived subgraph records = %d",
2315                       _num_total_subgraph_recordings);
2316   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
2317   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
2318   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2319 
2320 #ifndef PRODUCT
2321   for (int i = 0; fields[i].valid(); i++) {
2322     ArchivableStaticFieldInfo* f = &fields[i];
2323     verify_subgraph_from_static_field(f->klass, f->offset);
2324   }
2325   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
2326 #endif
2327 }

  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "cds/aotClassInitializer.hpp"
  27 #include "cds/archiveBuilder.hpp"
  28 #include "cds/archiveHeapLoader.hpp"
  29 #include "cds/archiveHeapWriter.hpp"
  30 #include "cds/archiveUtils.hpp"
  31 #include "cds/cdsAccess.hpp"
  32 #include "cds/cdsConfig.hpp"
  33 #include "cds/cdsEnumKlass.hpp"
  34 #include "cds/cdsHeapVerifier.hpp"
  35 #include "cds/heapShared.hpp"
  36 #include "cds/metaspaceShared.hpp"
  37 #include "classfile/classLoaderData.hpp"
  38 #include "classfile/classLoaderExt.hpp"
  39 #include "classfile/javaClasses.inline.hpp"
  40 #include "classfile/modules.hpp"
  41 #include "classfile/stringTable.hpp"
  42 #include "classfile/symbolTable.hpp"
  43 #include "classfile/systemDictionary.hpp"
  44 #include "classfile/systemDictionaryShared.hpp"
  45 #include "classfile/vmClasses.hpp"
  46 #include "classfile/vmSymbols.hpp"
  47 #include "gc/shared/collectedHeap.hpp"
  48 #include "gc/shared/gcLocker.hpp"
  49 #include "gc/shared/gcVMOperations.hpp"
  50 #include "logging/log.hpp"
  51 #include "logging/logStream.hpp"

  70 #include "gc/g1/g1CollectedHeap.hpp"
  71 #endif
  72 
  73 #if INCLUDE_CDS_JAVA_HEAP
  74 
  75 struct ArchivableStaticFieldInfo {
  76   const char* klass_name;
  77   const char* field_name;
  78   InstanceKlass* klass;
  79   int offset;
  80   BasicType type;
  81 
  82   ArchivableStaticFieldInfo(const char* k, const char* f)
  83   : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
  84 
  85   bool valid() {
  86     return klass_name != nullptr;
  87   }
  88 };
  89 
  90 class HeapShared::ArchivingObjectMark : public StackObj {
  91 public:
  92   ArchivingObjectMark(oop obj) {
  93     _trace->push(obj);
  94   }
  95   ~ArchivingObjectMark() {
  96     _trace->pop();
  97   }
  98 };
  99 
 100 class HeapShared::ContextMark : public StackObj {
 101   ResourceMark rm;
 102 public:
 103   ContextMark(const char* c) : rm{} {
 104     _context->push(c);
 105   }
 106   ~ContextMark() {
 107     _context->pop();
 108   }
 109 };
 110 
 111 bool HeapShared::_disable_writing = false;
 112 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
 113 
 114 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
 115 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
 116 size_t HeapShared::_total_obj_count;
 117 size_t HeapShared::_total_obj_size;
 118 
 119 #ifndef PRODUCT
 120 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
 121 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
 122 static const char* _test_class_name = nullptr;
 123 static Klass* _test_class = nullptr;
 124 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
 125 #endif
 126 
 127 
 128 //
 129 // If you add new entries to the following tables, you should know what you're doing!
 130 //
 131 
 132 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
 133   {"java/lang/Integer$IntegerCache",              "archivedCache"},
 134   {"java/lang/Long$LongCache",                    "archivedCache"},
 135   {"java/lang/Byte$ByteCache",                    "archivedCache"},
 136   {"java/lang/Short$ShortCache",                  "archivedCache"},
 137   {"java/lang/Character$CharacterCache",          "archivedCache"},
 138   {"java/util/jar/Attributes$Name",               "KNOWN_NAMES"},
 139   {"sun/util/locale/BaseLocale",                  "constantBaseLocales"},
 140   {"jdk/internal/module/ArchivedModuleGraph",     "archivedModuleGraph"},
 141   {"java/util/ImmutableCollections",              "archivedObjects"},
 142   {"java/lang/ModuleLayer",                       "EMPTY_LAYER"},
 143   {"java/lang/module/Configuration",              "EMPTY_CONFIGURATION"},
 144   {"jdk/internal/math/FDBigInteger",              "archivedCaches"},
 145   {"java/lang/reflect/Proxy$ProxyBuilder",        "archivedData"},    // FIXME -- requires AOTClassLinking
 146 
 147 #ifndef PRODUCT
 148   {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
 149 #endif
 150   {nullptr, nullptr},
 151 };
 152 
 153 // full module graph
 154 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
 155   {"jdk/internal/loader/ArchivedClassLoaders",    "archivedClassLoaders"},
 156   {ARCHIVED_BOOT_LAYER_CLASS,                     ARCHIVED_BOOT_LAYER_FIELD},
 157   {"java/lang/Module$ArchivedData",               "archivedData"},
 158   {nullptr, nullptr},
 159 };
 160 
 161 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
 162 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
 163 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
 164 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_trace = nullptr;
 165 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
 166 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
 167 int HeapShared::_root_segment_max_size_elems;
 168 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
 169 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
 170 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
 171 
 172 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
 173   for (int i = 0; fields[i].valid(); i++) {
 174     if (fields[i].klass == ik) {
 175       return true;
 176     }
 177   }
 178   return false;
 179 }
 180 
 181 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
 182   return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
 183          is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
 184 }
 185 

 233                          vmSymbols::void_BuiltinClassLoader_signature(),
 234                          CHECK);
 235   Handle boot_loader(THREAD, result.get_oop());
 236   reset_states(boot_loader(), CHECK);
 237 }
 238 
 239 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
 240 
 241 bool HeapShared::has_been_archived(oop obj) {
 242   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 243   return archived_object_cache()->get(obj) != nullptr;
 244 }
 245 
 246 int HeapShared::append_root(oop obj) {
 247   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 248 
 249   // No GC should happen since we aren't scanning _pending_roots.
 250   assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 251 
 252   if (_pending_roots == nullptr) {
 253     _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
 254   }
 255 
 256   OopHandle oh(Universe::vm_global(), obj);
 257   return _pending_roots->append(oh);
 258 }
 259 
 260 objArrayOop HeapShared::root_segment(int segment_idx) {
 261   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
 262     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
 263     if (!HeapShared::can_write()) {
 264       return nullptr;
 265     }
 266   } else {
 267     assert(CDSConfig::is_using_archive(), "must be");
 268   }
 269 
 270   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
 271   assert(segment != nullptr, "should have been initialized");
 272   return segment;
 273 }
 274 
 275 inline unsigned int oop_handle_hash(const OopHandle& oh) {
 276   oop o = oh.resolve();
 277   if (o == nullptr) {
 278     return 0;
 279   } else {
 280     return o->identity_hash();
 281   }
 282 }
 283 
 284 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
 285   return a.resolve() == b.resolve();
 286 }
 287 
 288 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
 289     36137, // prime number
 290     AnyObj::C_HEAP,
 291     mtClassShared,
 292     oop_handle_hash,
 293     oop_handle_equals> {};
 294 
 295 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
 296 
 297 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
 298   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 299   if (_orig_to_scratch_object_table == nullptr) {
 300     _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
 301   }
 302 
 303   OopHandle orig_h(Universe::vm_global(), orig_obj);
 304   OopHandle scratch_h(Universe::vm_global(), scratch_obj);
 305   _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
 306 }
 307 
 308 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
 309   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 310   if (_orig_to_scratch_object_table != nullptr) {
 311     OopHandle orig(&orig_obj);
 312     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 313     if (v != nullptr) {
 314       return v->resolve();
 315     }
 316   }
 317   return nullptr;
 318 }
 319 
 320 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
 321 // to Strings and MH oops.
 322 //
 323 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
 324 // and are accssed vis CDSAccess::get_archived_object(int).
 325 struct PermanentOopInfo {
 326   int _index;       // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
 327   int _heap_offset; // Offset of the object from the bottom of the archived heap.
 328   PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
 329 };
 330 
 331 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
 332     36137, // prime number
 333     AnyObj::C_HEAP,
 334     mtClassShared,
 335     oop_handle_hash,
 336     oop_handle_equals> {};
 337 
 338 static int _dumptime_permanent_oop_count = 0;
 339 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
 340 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
 341 
 342 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
 343 // so we can remember their offset (from the bottom of the archived heap).
 344 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
 345   assert_at_safepoint();
 346   if (_dumptime_permanent_oop_table == nullptr) {
 347     _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
 348   }
 349 
 350   PermanentOopInfo info(-1, offset);
 351   OopHandle oh(Universe::vm_global(), obj);
 352   _dumptime_permanent_oop_table->put_when_absent(oh, info);
 353 }
 354 
 355 // A permanent index is assigned to an archived object ONLY when
 356 // the AOT compiler calls this function.
 357 int HeapShared::get_archived_object_permanent_index(oop obj) {
 358   MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
 359 
 360   if (!CDSConfig::is_dumping_heap()) {
 361     return -1; // Called by the Leyden old workflow
 362   }
 363   if (_dumptime_permanent_oop_table == nullptr) {
 364     return -1;
 365   }
 366 
 367   if (_orig_to_scratch_object_table != nullptr) {
 368     OopHandle orig(&obj);
 369     OopHandle* v = _orig_to_scratch_object_table->get(orig);
 370     if (v != nullptr) {
 371       obj = v->resolve();
 372     }
 373   }
 374 
 375   OopHandle tmp(&obj);
 376   PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
 377   if (info == nullptr) {
 378     return -1;
 379   } else {
 380     if (info->_index < 0) {
 381       info->_index = _dumptime_permanent_oop_count++;
 382     }
 383     return info->_index;
 384   }
 385 }
 386 
 387 oop HeapShared::get_archived_object(int permanent_index) {
 388   assert(permanent_index >= 0, "sanity");
 389   assert(ArchiveHeapLoader::is_in_use(), "sanity");
 390   assert(_runtime_permanent_oops != nullptr, "sanity");
 391 
 392   return _runtime_permanent_oops->at(permanent_index).resolve();
 393 }
 394 
 395 // Remember all archived heap objects that have a permanent index.
 396 //   table[i] = offset of oop whose permanent index is i.
 397 void CachedCodeDirectoryInternal::dumptime_init_internal() {
 398   const int count = _dumptime_permanent_oop_count;
 399   int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
 400   for (int i = 0; i < count; i++) {
 401     table[count] = -1;
 402   }
 403   _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
 404     int index = info._index;
 405     if (index >= 0) {
 406       assert(index < count, "sanity");
 407       table[index] = info._heap_offset;
 408     }
 409     return true; // continue
 410   });
 411 
 412   for (int i = 0; i < count; i++) {
 413     assert(table[i] >= 0, "must be");
 414   }
 415 
 416   log_info(cds)("Dumped %d permanent oops", count);
 417 
 418   _permanent_oop_count = count;
 419   CDSAccess::set_pointer(&_permanent_oop_offsets, table);
 420 }
 421 
 422 // This is called during the bootstrap of the production run, before any GC can happen.
 423 // Record each permanent oop in a OopHandle for GC safety.
 424 void CachedCodeDirectoryInternal::runtime_init_internal() {
 425   int count = _permanent_oop_count;
 426   int* table = _permanent_oop_offsets;
 427   _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
 428   for (int i = 0; i < count; i++) {
 429     oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
 430     OopHandle oh(Universe::vm_global(), obj);
 431     _runtime_permanent_oops->append(oh);
 432   }
 433 };
 434 
 435 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
 436   assert(_root_segment_max_size_elems > 0, "sanity");
 437 
 438   // Try to avoid divisions for the common case.
 439   if (idx < _root_segment_max_size_elems) {
 440     seg_idx = 0;
 441     int_idx = idx;
 442   } else {
 443     seg_idx = idx / _root_segment_max_size_elems;
 444     int_idx = idx % _root_segment_max_size_elems;
 445   }
 446 
 447   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
 448          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
 449 }
 450 
 451 // Returns an objArray that contains all the roots of the archived objects
 452 oop HeapShared::get_root(int index, bool clear) {
 453   assert(index >= 0, "sanity");
 454   assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");

 536       return nullptr;
 537     }
 538   }
 539   void set_oop(MetaspaceObj* ptr, oop o) {
 540     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 541     OopHandle handle(Universe::vm_global(), o);
 542     bool is_new = put(ptr, handle);
 543     assert(is_new, "cannot set twice");
 544   }
 545   void remove_oop(MetaspaceObj* ptr) {
 546     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 547     OopHandle* handle = get(ptr);
 548     if (handle != nullptr) {
 549       handle->release(Universe::vm_global());
 550       remove(ptr);
 551     }
 552   }
 553 };
 554 
 555 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 556   if (_scratch_references_table == nullptr) {
 557     _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
 558   }
 559   _scratch_references_table->set_oop(src, dest);
 560 }
 561 
 562 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 563   return (objArrayOop)_scratch_references_table->get_oop(src);
 564 }
 565 
 566 void HeapShared::init_scratch_objects(TRAPS) {
 567   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 568     BasicType bt = (BasicType)i;
 569     if (!is_reference_type(bt)) {
 570       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 571       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 572       track_scratch_object(Universe::java_mirror(bt), m);
 573     }
 574   }
 575   _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
 576   if (_scratch_references_table == nullptr) {
 577     _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
 578   }
 579 }
 580 
 581 // Given java_mirror that represents a (primitive or reference) type T,
 582 // return the "scratch" version that represents the same type T.
 583 // Note that if java_mirror will be returned if it's already a
 584 // scratch mirror.
 585 //
 586 // See java_lang_Class::create_scratch_mirror() for more info.
 587 oop HeapShared::scratch_java_mirror(oop java_mirror) {
 588   assert(java_lang_Class::is_instance(java_mirror), "must be");
 589 
 590   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 591     BasicType bt = (BasicType)i;
 592     if (!is_reference_type(bt)) {
 593       if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
 594         return java_mirror;
 595       }
 596     }
 597   }
 598 
 599   if (java_lang_Class::is_primitive(java_mirror)) {
 600     return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
 601   } else {
 602     return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
 603   }
 604 }
 605 
 606 oop HeapShared::scratch_java_mirror(BasicType t) {
 607   assert((uint)t < T_VOID+1, "range check");
 608   assert(!is_reference_type(t), "sanity");
 609   return _scratch_basic_type_mirrors[t].resolve();
 610 }
 611 
 612 oop HeapShared::scratch_java_mirror(Klass* k) {
 613   return _scratch_java_mirror_table->get_oop(k);
 614 }
 615 
 616 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
 617   track_scratch_object(k->java_mirror(), mirror);
 618   _scratch_java_mirror_table->set_oop(k, mirror);
 619 }
 620 
 621 void HeapShared::remove_scratch_objects(Klass* k) {
 622   // Klass is being deallocated. Java mirror can still be alive, and it should not
 623   // point to dead klass. We need to break the link from mirror to the Klass.
 624   // See how InstanceKlass::deallocate_contents does it for normal mirrors.
 625   oop mirror = _scratch_java_mirror_table->get_oop(k);
 626   if (mirror != nullptr) {
 627     java_lang_Class::set_klass(mirror, nullptr);
 628   }
 629   _scratch_java_mirror_table->remove_oop(k);
 630   if (k->is_instance_klass()) {
 631     _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
 632   }
 633   if (mirror != nullptr) {
 634     OopHandle tmp(&mirror);
 635     OopHandle* v = _orig_to_scratch_object_table->get(tmp);
 636     if (v != nullptr) {
 637       oop scratch_mirror = v->resolve();
 638       java_lang_Class::set_klass(scratch_mirror, nullptr);
 639       _orig_to_scratch_object_table->remove(tmp);
 640     }
 641   }
 642 }
 643 
 644 //TODO: we eventually want a more direct test for these kinds of things.
 645 //For example the JVM could record some bit of context from the creation
 646 //of the klass, such as who called the hidden class factory.  Using
 647 //string compares on names is fragile and will break as soon as somebody
 648 //changes the names in the JDK code.  See discussion in JDK-8342481 for
 649 //related ideas about marking AOT-related classes.
 650 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
 651   return ik->is_hidden() &&
 652     (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
 653      ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
 654      ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
 655      ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
 656 }
 657 
 658 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
 659   return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
 660 }
 661 

 956         mark_required_if_hidden_class(java_lang_Class::as_Klass(o));
 957       } else if (java_lang_invoke_ResolvedMethodName::is_instance(o)) {
 958         Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(o);
 959         if (m != nullptr) {
 960           mark_required_if_hidden_class(m->method_holder());
 961         }
 962       }
 963 
 964       o->oop_iterate(&c);
 965     }
 966   }
 967 }
 968 
 969 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
 970   {
 971     NoSafepointVerifier nsv;
 972 
 973     // The special subgraph doesn't belong to any class. We use Object_klass() here just
 974     // for convenience.
 975     _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
 976     _trace = new GrowableArrayCHeap<oop, mtClassShared>(250);
 977     _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
 978 
 979     // Cache for recording where the archived objects are copied to
 980     create_archived_object_cache();
 981 
 982     if (UseCompressedOops || UseG1GC) {
 983       log_info(cds)("Heap range = [" PTR_FORMAT " - "  PTR_FORMAT "]",
 984                     UseCompressedOops ? p2i(CompressedOops::begin()) :
 985                                         p2i((address)G1CollectedHeap::heap()->reserved().start()),
 986                     UseCompressedOops ? p2i(CompressedOops::end()) :
 987                                         p2i((address)G1CollectedHeap::heap()->reserved().end()));
 988     }
 989     copy_objects();
 990 
 991     if (!SkipArchiveHeapVerification) {
 992       CDSHeapVerifier::verify();
 993     }
 994     check_special_subgraph_classes();
 995   }
 996 
 997   GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
 998   for (int i = 0; i < _pending_roots->length(); i++) {
 999     roots->append(_pending_roots->at(i).resolve());
1000   }
1001   ArchiveHeapWriter::write(roots, heap_info);
1002 }
1003 
1004 void HeapShared::copy_interned_strings() {
1005   init_seen_objects_table();
1006 
1007   auto copier = [&] (oop s, bool value_ignored) {
1008     assert(s != nullptr, "sanity");
1009     assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
1010     bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, s);
1011     assert(success, "must be");
1012     // Prevent string deduplication from changing the value field to
1013     // something not in the archive.
1014     java_lang_String::set_deduplication_forbidden(s);
1015   };
1016   _dumped_interned_strings->iterate_all(copier);
1017 
1018   delete_seen_objects_table();
1019 }
1020 
1021 void HeapShared::copy_special_subgraph() {

1117     // ArchiveHeapTestClass is used for a very small number of internal regression
1118     // tests (non-product builds only). It may initialize some unexpected classes.
1119     if (ArchiveHeapTestClass == nullptr)
1120 #endif
1121     {
1122       if (!src_ik->in_javabase_module()) {
1123         // Class/interface types in the boot loader may have been initialized as side effects
1124         // of JVM bootstrap code, so they are fine. But we need to check all other classes.
1125         if (buffered_ik->is_interface()) {
1126           // This probably means a bug in AOTConstantPoolResolver.::is_indy_resolution_deterministic()
1127           guarantee(!buffered_ik->interface_needs_clinit_execution_as_super(),
1128                     "should not have initialized an interface whose <clinit> might have unpredictable side effects");
1129         } else {
1130           // "normal" classes
1131           guarantee(HeapShared::is_archivable_hidden_klass(buffered_ik),
1132                     "should not have initialized any non-interface, non-hidden classes outside of java.base");
1133         }
1134       }
1135     }
1136 
1137 #if 0
1138     if (buffered_ik->name()->equals("jdk/internal/loader/NativeLibraries")) {  // FIXME -- leyden+JEP483 merge
1139       return false;
1140     }
1141 #endif
1142     buffered_ik->set_has_aot_initialized_mirror();
1143     if (AOTClassInitializer::is_runtime_setup_required(src_ik)) {
1144       buffered_ik->set_is_runtime_setup_required();
1145     }
1146     made_progress = true;
1147 
1148     InstanceKlass* super = buffered_ik->java_super();
1149     if (super != nullptr) {
1150       mark_for_aot_initialization(super);
1151     }
1152 
1153     Array<InstanceKlass*>* interfaces = buffered_ik->transitive_interfaces();
1154     for (int i = 0; i < interfaces->length(); i++) {
1155       InstanceKlass* intf = interfaces->at(i);
1156       mark_for_aot_initialization(intf);
1157       if (!intf->is_initialized()) {
1158         assert(!intf->interface_needs_clinit_execution_as_super(/*also_check_supers*/false), "sanity");
1159         assert(!intf->has_aot_initialized_mirror(), "must not be marked");
1160       }
1161     }

1347       return;
1348     }
1349   } else {
1350     assert(buffered_k->is_typeArray_klass(), "must be");
1351     // Primitive type arrays are created early during Universe::genesis.
1352     return;
1353   }
1354 
1355   if (log_is_enabled(Debug, cds, heap)) {
1356     if (!_subgraph_object_klasses->contains(buffered_k)) {
1357       ResourceMark rm;
1358       log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
1359     }
1360   }
1361 
1362   _subgraph_object_klasses->append_if_missing(buffered_k);
1363   _has_non_early_klasses |= is_non_early_klass(orig_k);
1364 }
1365 
1366 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1367   if (CDSConfig::is_dumping_invokedynamic()) {
1368     // FIXME -- this allows LambdaProxy classes
1369     return;
1370   }
1371   if (ik->module()->name() == vmSymbols::java_base()) {
1372     assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
1373     return;
1374   }
1375 
1376   const char* lambda_msg = "";
1377   if (CDSConfig::is_dumping_invokedynamic()) {
1378     lambda_msg = ", or a lambda proxy class";
1379     if (HeapShared::is_lambda_proxy_klass(ik) &&
1380         (ik->class_loader() == nullptr ||
1381          ik->class_loader() == SystemDictionary::java_platform_loader() ||
1382          ik->class_loader() == SystemDictionary::java_system_loader())) {
1383       return;
1384     }
1385   }
1386 
1387 #ifndef PRODUCT
1388   if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) {
1389     // This class is loaded by ArchiveHeapTestClass
1390     return;

1588                           which, k->external_name());
1589       FlagSetting fs1(VerifyBeforeGC, true);
1590       FlagSetting fs2(VerifyDuringGC, true);
1591       FlagSetting fs3(VerifyAfterGC,  true);
1592       Universe::heap()->collect(GCCause::_java_lang_system_gc);
1593     }
1594   }
1595 }
1596 
1597 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1598 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1599 //
1600 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1601 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1602 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1603 void HeapShared::resolve_classes(JavaThread* current) {
1604   assert(CDSConfig::is_using_archive(), "runtime only!");
1605   if (!ArchiveHeapLoader::is_in_use()) {
1606     return; // nothing to do
1607   }
1608 
1609   if (!CDSConfig::is_using_aot_linked_classes()) {
1610     assert( _run_time_special_subgraph != nullptr, "must be");
1611     Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1612     if (klasses != nullptr) {
1613       for (int i = 0; i < klasses->length(); i++) {
1614         Klass* k = klasses->at(i);
1615         ExceptionMark em(current); // no exception can happen here
1616         resolve_or_init(k, /*do_init*/false, current);
1617       }
1618     }
1619   }
1620 
1621   resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1622   resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1623 }
1624 
1625 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1626   for (int i = 0; fields[i].valid(); i++) {
1627     ArchivableStaticFieldInfo* info = &fields[i];
1628     TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1629     InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1630     assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1631     resolve_classes_for_subgraph_of(current, k);
1632   }
1633 }
1634 
1635 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1636   JavaThread* THREAD = current;
1637   ExceptionMark em(THREAD);
1638   const ArchivedKlassSubGraphInfoRecord* record =
1639    resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1640   if (HAS_PENDING_EXCEPTION) {

1969   oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1970   PointsToOopsChecker points_to_oops_checker;
1971   obj->oop_iterate(&points_to_oops_checker);
1972   return CachedOopInfo(referrer, points_to_oops_checker.result());
1973 }
1974 
1975 void HeapShared::init_box_classes(TRAPS) {
1976   if (ArchiveHeapLoader::is_in_use()) {
1977     vmClasses::Boolean_klass()->initialize(CHECK);
1978     vmClasses::Character_klass()->initialize(CHECK);
1979     vmClasses::Float_klass()->initialize(CHECK);
1980     vmClasses::Double_klass()->initialize(CHECK);
1981     vmClasses::Byte_klass()->initialize(CHECK);
1982     vmClasses::Short_klass()->initialize(CHECK);
1983     vmClasses::Integer_klass()->initialize(CHECK);
1984     vmClasses::Long_klass()->initialize(CHECK);
1985     vmClasses::Void_klass()->initialize(CHECK);
1986   }
1987 }
1988 
1989 void HeapShared::exit_on_error() {
1990   if (_context != nullptr) {
1991     ResourceMark rm;
1992     LogStream ls(Log(cds, heap)::error());
1993     ls.print_cr("Context");
1994     for (int i = 0; i < _context->length(); i++) {
1995       const char* s = _context->at(i);
1996       ls.print_cr("- %s", s);
1997     }
1998   }
1999   if (_trace != nullptr) {
2000     ResourceMark rm;
2001     LogStream ls(Log(cds, heap)::error());
2002     ls.print_cr("Reference trace");
2003     for (int i = 0; i < _trace->length(); i++) {
2004       oop orig_obj = _trace->at(i);
2005       ls.print_cr("[%d] ========================================", i);
2006       orig_obj->print_on(&ls);
2007       ls.cr();
2008     }
2009   }
2010   MetaspaceShared::unrecoverable_writing_error();
2011 }
2012 
2013 // (1) If orig_obj has not been archived yet, archive it.
2014 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
2015 //     trace all  objects that are reachable from it, and make sure these objects are archived.
2016 // (3) Record the klasses of all orig_obj and all reachable objects.
2017 bool HeapShared::archive_reachable_objects_from(int level,
2018                                                 KlassSubGraphInfo* subgraph_info,
2019                                                 oop orig_obj) {
2020   ArchivingObjectMark mark(orig_obj);
2021   assert(orig_obj != nullptr, "must be");
2022 
2023   if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
2024     // This object has injected fields that cannot be supported easily, so we disallow them for now.
2025     // If you get an error here, you probably made a change in the JDK library that has added
2026     // these objects that are referenced (directly or indirectly) by static fields.
2027     ResourceMark rm;
2028     log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
2029     debug_trace();
2030     exit_on_error();
2031   }
2032 
2033   if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
2034     ResourceMark rm;
2035     LogTarget(Debug, cds, heap) log;
2036     LogStream out(log);
2037     out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
2038     Klass* k = java_lang_Class::as_Klass(orig_obj);
2039     if (k != nullptr) {
2040       out.print("%s", k->external_name());
2041     } else {
2042       out.print("primitive");
2043     }
2044     out.print_cr("; scratch mirror = "  PTR_FORMAT,
2045                  p2i(scratch_java_mirror(orig_obj)));
2046   }
2047 
2048   if (CDSConfig::is_initing_classes_at_dump_time()) {
2049     if (java_lang_Class::is_instance(orig_obj)) {
2050       orig_obj = scratch_java_mirror(orig_obj);

2086 
2087   bool already_archived = has_been_archived(orig_obj);
2088   bool record_klasses_only = already_archived;
2089   if (!already_archived) {
2090     ++_num_new_archived_objs;
2091     if (!archive_object(orig_obj)) {
2092       // Skip archiving the sub-graph referenced from the current entry field.
2093       ResourceMark rm;
2094       log_error(cds, heap)(
2095         "Cannot archive the sub-graph referenced from %s object ("
2096         PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
2097         orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
2098       if (level == 1) {
2099         // Don't archive a subgraph root that's too big. For archives static fields, that's OK
2100         // as the Java code will take care of initializing this field dynamically.
2101         return false;
2102       } else {
2103         // We don't know how to handle an object that has been archived, but some of its reachable
2104         // objects cannot be archived. Bail out for now. We might need to fix this in the future if
2105         // we have a real use case.
2106         exit_on_error();
2107       }
2108     }
2109   }
2110 
2111   Klass *orig_k = orig_obj->klass();
2112   subgraph_info->add_subgraph_object_klass(orig_k);
2113 
2114   WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
2115   orig_obj->oop_iterate(&walker);
2116 
2117   if (CDSConfig::is_initing_classes_at_dump_time()) {
2118     // The enum klasses are archived with aot-initialized mirror.
2119     // See AOTClassInitializer::can_archive_initialized_mirror().
2120   } else {
2121     if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2122       CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
2123     }
2124   }
2125 
2126   return true;

2535 
2536 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2537                                           bool is_full_module_graph) {
2538   _num_total_subgraph_recordings = 0;
2539   _num_total_walked_objs = 0;
2540   _num_total_archived_objs = 0;
2541   _num_total_recorded_klasses = 0;
2542   _num_total_verifications = 0;
2543 
2544   // For each class X that has one or more archived fields:
2545   // [1] Dump the subgraph of each archived field
2546   // [2] Create a list of all the class of the objects that can be reached
2547   //     by any of these static fields.
2548   //     At runtime, these classes are initialized before X's archived fields
2549   //     are restored by HeapShared::initialize_from_archived_subgraph().
2550   for (int i = 0; fields[i].valid(); ) {
2551     ArchivableStaticFieldInfo* info = &fields[i];
2552     const char* klass_name = info->klass_name;
2553     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2554 
2555     ContextMark cm(klass_name);
2556     // If you have specified consecutive fields of the same klass in
2557     // fields[], these will be archived in the same
2558     // {start_recording_subgraph ... done_recording_subgraph} pass to
2559     // save time.
2560     for (; fields[i].valid(); i++) {
2561       ArchivableStaticFieldInfo* f = &fields[i];
2562       if (f->klass_name != klass_name) {
2563         break;
2564       }
2565 
2566       ContextMark cm(f->field_name);
2567       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2568                                                   f->offset, f->field_name);
2569     }
2570     done_recording_subgraph(info->klass, klass_name);
2571   }
2572 
2573   log_info(cds, heap)("Archived subgraph records = %d",
2574                       _num_total_subgraph_recordings);
2575   log_info(cds, heap)("  Walked %d objects", _num_total_walked_objs);
2576   log_info(cds, heap)("  Archived %d objects", _num_total_archived_objs);
2577   log_info(cds, heap)("  Recorded %d klasses", _num_total_recorded_klasses);
2578 
2579 #ifndef PRODUCT
2580   for (int i = 0; fields[i].valid(); i++) {
2581     ArchivableStaticFieldInfo* f = &fields[i];
2582     verify_subgraph_from_static_field(f->klass, f->offset);
2583   }
2584   log_info(cds, heap)("  Verified %d references", _num_total_verifications);
2585 #endif
2586 }
< prev index next >