< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

  46 #include "classfile/javaClasses.inline.hpp"
  47 #include "classfile/modules.hpp"
  48 #include "classfile/stringTable.hpp"
  49 #include "classfile/symbolTable.hpp"
  50 #include "classfile/systemDictionary.hpp"
  51 #include "classfile/systemDictionaryShared.hpp"
  52 #include "classfile/vmClasses.hpp"
  53 #include "classfile/vmSymbols.hpp"
  54 #include "gc/shared/collectedHeap.hpp"
  55 #include "gc/shared/gcLocker.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "logging/log.hpp"
  58 #include "logging/logStream.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/fieldStreams.inline.hpp"
  64 #include "oops/objArrayOop.inline.hpp"
  65 #include "oops/oop.inline.hpp"

  66 #include "oops/oopHandle.inline.hpp"
  67 #include "oops/typeArrayOop.inline.hpp"
  68 #include "prims/jvmtiExport.hpp"
  69 #include "runtime/arguments.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/globals_extension.hpp"
  72 #include "runtime/init.hpp"
  73 #include "runtime/javaCalls.hpp"
  74 #include "runtime/mutexLocker.hpp"
  75 #include "runtime/safepointVerifiers.hpp"
  76 #include "utilities/bitMap.inline.hpp"
  77 #include "utilities/copy.hpp"
  78 #if INCLUDE_G1GC
  79 #include "gc/g1/g1CollectedHeap.hpp"
  80 #endif
  81 
  82 #if INCLUDE_CDS_JAVA_HEAP
  83 
  84 struct ArchivableStaticFieldInfo {
  85   const char* klass_name;

 415       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 416     } else {
 417       // Finish up archived heap initialization. These must be
 418       // done after ReadClosure.
 419       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 420     }
 421   }
 422 }
 423 
 424 void HeapShared::make_archived_object_cache_gc_safe() {
 425   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 426 
 427   // It's safe to change the behavior of the hash function now, because iterate_all()
 428   // doesn't call the hash function.
 429   //  See archived_object_cache_hash() for more details.
 430   assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
 431   _use_identity_hash_for_archived_object_cache = true;
 432 
 433   // Copy all CachedOopInfo into a new table using a different hashing algorithm
 434   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
 435       new_cache->put_when_absent(oh, info);
 436     });

















 437 
 438   destroy_archived_object_cache();
 439   _archived_object_cache = new_cache;
 440 }
 441 
 442 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 443   OopHandle oh(Universe::vm_global(), obj);
 444   CachedOopInfo* result = _archived_object_cache->get(oh);
 445   oh.release(Universe::vm_global());
 446   return result;
 447 }
 448 
 449 bool HeapShared::has_been_archived(oop obj) {
 450   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 451   return get_cached_oop_info(obj) != nullptr;
 452 }
 453 
 454 int HeapShared::append_root(oop obj) {
 455   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
 456   assert(CDSConfig::is_dumping_heap(), "dump-time only");

 660     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 661     OopHandle* handle = get(ptr);
 662     if (handle != nullptr) {
 663       handle->release(Universe::vm_global());
 664       remove(ptr);
 665     }
 666   }
 667 };
 668 
 669 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 670   if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
 671     // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
 672     // Ignore it, as this class will be excluded from the AOT config.
 673     return;
 674   }
 675   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 676     _scratch_objects_table->set_oop(src, dest);
 677   }
 678 }
 679 
 680 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 681   return (objArrayOop)_scratch_objects_table->get_oop(src);

 682 }
 683 
 684 void HeapShared::init_dumping() {
 685   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 686   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 687   _pending_roots->append(nullptr); // root index 0 represents a null oop
 688 }
 689 
 690 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 691   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 692     BasicType bt = (BasicType)i;
 693     if (!is_reference_type(bt)) {
 694       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 695       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 696     }
 697   }
 698 }
 699 
 700 // Given java_mirror that represents a (primitive or reference) type T,
 701 // return the "scratch" version that represents the same type T. Note

 851     assert(success, "sanity");
 852   }
 853 
 854   if (log_is_enabled(Debug, aot, init)) {
 855     ResourceMark rm;
 856     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 857                          ik->is_hidden() ? " (hidden)" : "",
 858                          ik->is_enum_subclass() ? " (enum)" : "");
 859   }
 860 }
 861 
 862 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 863   // We need to retain the identity_hash, because it may have been used by some hashtables
 864   // in the shared heap.
 865   if (!orig_mirror->fast_no_hash_check()) {
 866     intptr_t src_hash = orig_mirror->identity_hash();
 867     if (UseCompactObjectHeaders) {
 868       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 869       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 870     } else {

 871       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 872     }
 873     assert(scratch_m->mark().is_unlocked(), "sanity");
 874 
 875     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 876     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 877   }
 878 



















 879   if (CDSConfig::is_dumping_aot_linked_classes()) {
 880     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 881     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 882   }
 883 }
 884 
 885 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 886   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 887     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 888     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 889       return HeapShared::scratch_resolved_references(src_ik->constants());
 890     }
 891   }
 892   return nullptr;
 893 }
 894 
 895 int HeapShared::archive_exception_instance(oop exception) {
 896   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 897   assert(success, "sanity");
 898   return append_root(exception);

1073     // the original Klass*
1074     if (orig_k == vmClasses::String_klass() ||
1075         orig_k == vmClasses::Object_klass()) {
1076       // Initialized early during VM initialization. No need to be added
1077       // to the sub-graph object class list.
1078       return;
1079     }
1080     check_allowed_klass(InstanceKlass::cast(orig_k));
1081   } else if (orig_k->is_objArray_klass()) {
1082     Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass();
1083     if (abk->is_instance_klass()) {
1084       assert(InstanceKlass::cast(abk)->defined_by_boot_loader(),
1085             "must be boot class");
1086       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1087     }
1088     if (orig_k == Universe::objectArrayKlass()) {
1089       // Initialized early during Universe::genesis. No need to be added
1090       // to the list.
1091       return;
1092     }



1093   } else {
1094     assert(orig_k->is_typeArray_klass(), "must be");
1095     // Primitive type arrays are created early during Universe::genesis.
1096     return;
1097   }
1098 
1099   if (log_is_enabled(Debug, aot, heap)) {
1100     if (!_subgraph_object_klasses->contains(orig_k)) {
1101       ResourceMark rm;
1102       log_debug(aot, heap)("Adding klass %s", orig_k->external_name());
1103     }
1104   }
1105 
1106   _subgraph_object_klasses->append_if_missing(orig_k);
1107 }
1108 
1109 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1110 #ifndef PRODUCT
1111   if (AOTClassInitializer::has_test_class()) {
1112     // The tests can cache arbitrary types of objects.

1447     _test_class = k;
1448     _test_class_record = record;
1449   }
1450 #endif
1451 
1452   // Initialize from archived data. Currently this is done only
1453   // during VM initialization time. No lock is needed.
1454   if (record == nullptr) {
1455     if (log_is_enabled(Info, aot, heap)) {
1456       ResourceMark rm(THREAD);
1457       log_info(aot, heap)("subgraph %s is not recorded",
1458                           k->external_name());
1459     }
1460     return nullptr;
1461   } else {
1462     if (log_is_enabled(Info, aot, heap)) {
1463       ResourceMark rm;
1464       log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1465     }
1466 














1467     resolve_or_init(k, do_init, CHECK_NULL);
1468 
1469     // Load/link/initialize the klasses of the objects in the subgraph.
1470     // nullptr class loader is used.
1471     Array<Klass*>* klasses = record->subgraph_object_klasses();
1472     if (klasses != nullptr) {
1473       for (int i = 0; i < klasses->length(); i++) {
1474         Klass* klass = klasses->at(i);
1475         if (!klass->in_aot_cache()) {
1476           return nullptr;
1477         }
1478         resolve_or_init(klass, do_init, CHECK_NULL);
1479       }
1480     }
1481   }
1482 
1483   return record;
1484 }
1485 
1486 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1487   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1488   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1489   if (k == nullptr) {
1490     return;
1491   }
1492   assert(k->defined_by_boot_loader(), "sanity");
1493   resolve_or_init(k, false, CHECK);
1494   if (do_init) {
1495     resolve_or_init(k, true, CHECK);
1496   }
1497 }
1498 
1499 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1500   if (!do_init) {
1501     if (k->class_loader_data() == nullptr) {
1502       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1503       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");




1504     }
1505   } else {
1506     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1507     if (k->is_instance_klass()) {
1508       InstanceKlass* ik = InstanceKlass::cast(k);
1509       ik->initialize(CHECK);
1510     } else if (k->is_objArray_klass()) {
1511       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1512       oak->initialize(CHECK);
1513     }
1514   }
1515 }
1516 
1517 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1518   verify_the_heap(k, "before");
1519 
1520   Array<int>* entry_field_records = record->entry_field_records();
1521   if (entry_field_records != nullptr) {
1522     int efr_len = entry_field_records->length();
1523     assert(efr_len % 2 == 0, "sanity");

1950   }
1951 }
1952 #endif
1953 
1954 void HeapShared::check_special_subgraph_classes() {
1955   if (CDSConfig::is_dumping_aot_linked_classes()) {
1956     // We can have aot-initialized classes (such as Enums) that can reference objects
1957     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1958     // aot-initialize classes that are "safe".
1959     //
1960     // TODO: we need an automatic tool that checks the safety of aot-initialized
1961     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
1962     return;
1963   } else {
1964     // In this case, the special subgraph should contain a few specific types
1965     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
1966     int num = klasses->length();
1967     for (int i = 0; i < num; i++) {
1968       Klass* subgraph_k = klasses->at(i);
1969       Symbol* name = subgraph_k->name();
1970       if (subgraph_k->is_instance_klass() &&

1971           name != vmSymbols::java_lang_Class() &&
1972           name != vmSymbols::java_lang_String() &&
1973           name != vmSymbols::java_lang_ArithmeticException() &&
1974           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1975           name != vmSymbols::java_lang_ArrayStoreException() &&
1976           name != vmSymbols::java_lang_ClassCastException() &&
1977           name != vmSymbols::java_lang_InternalError() &&
1978           name != vmSymbols::java_lang_NullPointerException() &&
1979           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
1980         ResourceMark rm;
1981         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
1982       }
1983     }
1984   }
1985 }
1986 
1987 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1988 HeapShared::PendingOop HeapShared::_object_being_archived;
1989 size_t HeapShared::_num_new_walked_objs;
1990 size_t HeapShared::_num_new_archived_objs;

  46 #include "classfile/javaClasses.inline.hpp"
  47 #include "classfile/modules.hpp"
  48 #include "classfile/stringTable.hpp"
  49 #include "classfile/symbolTable.hpp"
  50 #include "classfile/systemDictionary.hpp"
  51 #include "classfile/systemDictionaryShared.hpp"
  52 #include "classfile/vmClasses.hpp"
  53 #include "classfile/vmSymbols.hpp"
  54 #include "gc/shared/collectedHeap.hpp"
  55 #include "gc/shared/gcLocker.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "logging/log.hpp"
  58 #include "logging/logStream.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/fieldStreams.inline.hpp"
  64 #include "oops/objArrayOop.inline.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "oops/oopCast.inline.hpp"
  67 #include "oops/oopHandle.inline.hpp"
  68 #include "oops/typeArrayOop.inline.hpp"
  69 #include "prims/jvmtiExport.hpp"
  70 #include "runtime/arguments.hpp"
  71 #include "runtime/fieldDescriptor.inline.hpp"
  72 #include "runtime/globals_extension.hpp"
  73 #include "runtime/init.hpp"
  74 #include "runtime/javaCalls.hpp"
  75 #include "runtime/mutexLocker.hpp"
  76 #include "runtime/safepointVerifiers.hpp"
  77 #include "utilities/bitMap.inline.hpp"
  78 #include "utilities/copy.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1CollectedHeap.hpp"
  81 #endif
  82 
  83 #if INCLUDE_CDS_JAVA_HEAP
  84 
  85 struct ArchivableStaticFieldInfo {
  86   const char* klass_name;

 416       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 417     } else {
 418       // Finish up archived heap initialization. These must be
 419       // done after ReadClosure.
 420       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 421     }
 422   }
 423 }
 424 
 425 void HeapShared::make_archived_object_cache_gc_safe() {
 426   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 427 
 428   // It's safe to change the behavior of the hash function now, because iterate_all()
 429   // doesn't call the hash function.
 430   //  See archived_object_cache_hash() for more details.
 431   assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
 432   _use_identity_hash_for_archived_object_cache = true;
 433 
 434   // Copy all CachedOopInfo into a new table using a different hashing algorithm
 435   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
 436       if (Arguments::is_valhalla_enabled() && oh.resolve()->klass()->is_inline_klass()) {
 437         // After make_archived_object_cache_gc_safe() returns,
 438         // _archived_object_cache->get() is called only from the (future) AOT code
 439         // compiler to access heap oops referenced by AOT-compiled method.
 440         //
 441         // As planned in JDK 27 (JDK-8335368), AOT-compiled methods will only reference
 442         // oops that are Strings, mirrors, or exceptions, all of which are not value
 443         // objects.
 444         //
 445         // We exclude value objects from new_cache, as we don't know how to track them
 446         // after the GC moves them. This should be fixed when AOT-compiled methods
 447         // need to reference value objects.
 448         //
 449         // Also TODO: the AOT heap should de-duplicate value objects with identical
 450         // values.
 451       } else {
 452         new_cache->put_when_absent(oh, info);
 453       }
 454   });
 455 
 456   destroy_archived_object_cache();
 457   _archived_object_cache = new_cache;
 458 }
 459 
 460 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 461   OopHandle oh(Universe::vm_global(), obj);
 462   CachedOopInfo* result = _archived_object_cache->get(oh);
 463   oh.release(Universe::vm_global());
 464   return result;
 465 }
 466 
 467 bool HeapShared::has_been_archived(oop obj) {
 468   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 469   return get_cached_oop_info(obj) != nullptr;
 470 }
 471 
 472 int HeapShared::append_root(oop obj) {
 473   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
 474   assert(CDSConfig::is_dumping_heap(), "dump-time only");

 678     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 679     OopHandle* handle = get(ptr);
 680     if (handle != nullptr) {
 681       handle->release(Universe::vm_global());
 682       remove(ptr);
 683     }
 684   }
 685 };
 686 
 687 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 688   if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
 689     // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
 690     // Ignore it, as this class will be excluded from the AOT config.
 691     return;
 692   }
 693   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 694     _scratch_objects_table->set_oop(src, dest);
 695   }
 696 }
 697 
 698 refArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 699   oop rr = _scratch_objects_table->get_oop(src);
 700   return rr == nullptr ? nullptr : oop_cast<refArrayOop>(rr);
 701 }
 702 
 703 void HeapShared::init_dumping() {
 704   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 705   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 706   _pending_roots->append(nullptr); // root index 0 represents a null oop
 707 }
 708 
 709 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 710   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 711     BasicType bt = (BasicType)i;
 712     if (!is_reference_type(bt)) {
 713       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
 714       _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
 715     }
 716   }
 717 }
 718 
 719 // Given java_mirror that represents a (primitive or reference) type T,
 720 // return the "scratch" version that represents the same type T. Note

 870     assert(success, "sanity");
 871   }
 872 
 873   if (log_is_enabled(Debug, aot, init)) {
 874     ResourceMark rm;
 875     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 876                          ik->is_hidden() ? " (hidden)" : "",
 877                          ik->is_enum_subclass() ? " (enum)" : "");
 878   }
 879 }
 880 
 881 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 882   // We need to retain the identity_hash, because it may have been used by some hashtables
 883   // in the shared heap.
 884   if (!orig_mirror->fast_no_hash_check()) {
 885     intptr_t src_hash = orig_mirror->identity_hash();
 886     if (UseCompactObjectHeaders) {
 887       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 888       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 889     } else {
 890       // For valhalla, the prototype header is the same as markWord::prototype();
 891       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 892     }
 893     assert(scratch_m->mark().is_unlocked(), "sanity");
 894 
 895     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 896     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 897   }
 898 
 899   Klass* k = java_lang_Class::as_Klass(orig_mirror);
 900   if (k != nullptr && k->is_instance_klass()) {
 901     InstanceKlass* ik = InstanceKlass::cast(k);
 902 
 903     if (ik->is_inline_klass() && ik->is_initialized()) {
 904       // Only concrete value classes need the null_reset field
 905       InlineKlass* ilk = InlineKlass::cast(k);
 906       if (ilk->supports_nullable_layouts()) {
 907         scratch_m->obj_field_put(ilk->null_reset_value_offset(), ilk->null_reset_value());
 908       }
 909     }
 910 
 911     if (ik->has_acmp_maps_offset()) {
 912       int maps_offset = ik->acmp_maps_offset();
 913       oop maps = orig_mirror->obj_field(maps_offset);
 914       scratch_m->obj_field_put(maps_offset, maps);
 915     }
 916   }
 917 
 918   if (CDSConfig::is_dumping_aot_linked_classes()) {
 919     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 920     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 921   }
 922 }
 923 
 924 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 925   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 926     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 927     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 928       return HeapShared::scratch_resolved_references(src_ik->constants());
 929     }
 930   }
 931   return nullptr;
 932 }
 933 
 934 int HeapShared::archive_exception_instance(oop exception) {
 935   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 936   assert(success, "sanity");
 937   return append_root(exception);

1112     // the original Klass*
1113     if (orig_k == vmClasses::String_klass() ||
1114         orig_k == vmClasses::Object_klass()) {
1115       // Initialized early during VM initialization. No need to be added
1116       // to the sub-graph object class list.
1117       return;
1118     }
1119     check_allowed_klass(InstanceKlass::cast(orig_k));
1120   } else if (orig_k->is_objArray_klass()) {
1121     Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass();
1122     if (abk->is_instance_klass()) {
1123       assert(InstanceKlass::cast(abk)->defined_by_boot_loader(),
1124             "must be boot class");
1125       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1126     }
1127     if (orig_k == Universe::objectArrayKlass()) {
1128       // Initialized early during Universe::genesis. No need to be added
1129       // to the list.
1130       return;
1131     }
1132     if (orig_k->is_flatArray_klass()) {
1133       _subgraph_object_klasses->append_if_missing(FlatArrayKlass::cast(orig_k)->element_klass());
1134     }
1135   } else {
1136     assert(orig_k->is_typeArray_klass(), "must be");
1137     // Primitive type arrays are created early during Universe::genesis.
1138     return;
1139   }
1140 
1141   if (log_is_enabled(Debug, aot, heap)) {
1142     if (!_subgraph_object_klasses->contains(orig_k)) {
1143       ResourceMark rm;
1144       log_debug(aot, heap)("Adding klass %s", orig_k->external_name());
1145     }
1146   }
1147 
1148   _subgraph_object_klasses->append_if_missing(orig_k);
1149 }
1150 
1151 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1152 #ifndef PRODUCT
1153   if (AOTClassInitializer::has_test_class()) {
1154     // The tests can cache arbitrary types of objects.

1489     _test_class = k;
1490     _test_class_record = record;
1491   }
1492 #endif
1493 
1494   // Initialize from archived data. Currently this is done only
1495   // during VM initialization time. No lock is needed.
1496   if (record == nullptr) {
1497     if (log_is_enabled(Info, aot, heap)) {
1498       ResourceMark rm(THREAD);
1499       log_info(aot, heap)("subgraph %s is not recorded",
1500                           k->external_name());
1501     }
1502     return nullptr;
1503   } else {
1504     if (log_is_enabled(Info, aot, heap)) {
1505       ResourceMark rm;
1506       log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1507     }
1508 
1509     Array<Klass*>* klasses = record->subgraph_object_klasses();
1510 
1511     if (do_init && klasses != nullptr) {
1512       // All the classes of the oops in this subgraph are in the klasses array.
1513       // Link them first in case any of the oops are used in the <clinit> methods
1514       // invoked in the rest of this function.
1515       for (int i = 0; i < klasses->length(); i++) {
1516         Klass* klass = klasses->at(i);
1517         if (klass->in_aot_cache() && klass->is_instance_klass()) {
1518           InstanceKlass::cast(klass)->link_class(CHECK_NULL);
1519         }
1520       }
1521     }
1522 
1523     resolve_or_init(k, do_init, CHECK_NULL);
1524 
1525     // Load/link/initialize the klasses of the objects in the subgraph.
1526     // nullptr class loader is used.

1527     if (klasses != nullptr) {
1528       for (int i = 0; i < klasses->length(); i++) {
1529         Klass* klass = klasses->at(i);
1530         if (!klass->in_aot_cache()) {
1531           return nullptr;
1532         }
1533         resolve_or_init(klass, do_init, CHECK_NULL);
1534       }
1535     }
1536   }
1537 
1538   return record;
1539 }
1540 
1541 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1542   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1543   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1544   if (k == nullptr) {
1545     return;
1546   }
1547   assert(k->defined_by_boot_loader(), "sanity");
1548   resolve_or_init(k, false, CHECK);
1549   if (do_init) {
1550     resolve_or_init(k, true, CHECK);
1551   }
1552 }
1553 
1554 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1555   if (!do_init) {
1556     if (k->class_loader_data() == nullptr) {
1557       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1558       if (resolved_k->is_array_klass()) {
1559         assert(resolved_k == k || resolved_k == k->super(), "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1560       } else {
1561         assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1562       }
1563     }
1564   } else {
1565     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1566     if (k->is_instance_klass()) {
1567       InstanceKlass* ik = InstanceKlass::cast(k);
1568       ik->initialize(CHECK);
1569     } else if (k->is_objArray_klass()) {
1570       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1571       oak->initialize(CHECK);
1572     }
1573   }
1574 }
1575 
1576 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1577   verify_the_heap(k, "before");
1578 
1579   Array<int>* entry_field_records = record->entry_field_records();
1580   if (entry_field_records != nullptr) {
1581     int efr_len = entry_field_records->length();
1582     assert(efr_len % 2 == 0, "sanity");

2009   }
2010 }
2011 #endif
2012 
2013 void HeapShared::check_special_subgraph_classes() {
2014   if (CDSConfig::is_dumping_aot_linked_classes()) {
2015     // We can have aot-initialized classes (such as Enums) that can reference objects
2016     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2017     // aot-initialize classes that are "safe".
2018     //
2019     // TODO: we need an automatic tool that checks the safety of aot-initialized
2020     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2021     return;
2022   } else {
2023     // In this case, the special subgraph should contain a few specific types
2024     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
2025     int num = klasses->length();
2026     for (int i = 0; i < num; i++) {
2027       Klass* subgraph_k = klasses->at(i);
2028       Symbol* name = subgraph_k->name();
2029 
2030       if (subgraph_k->is_identity_class() &&
2031           name != vmSymbols::java_lang_Class() &&
2032           name != vmSymbols::java_lang_String() &&
2033           name != vmSymbols::java_lang_ArithmeticException() &&
2034           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
2035           name != vmSymbols::java_lang_ArrayStoreException() &&
2036           name != vmSymbols::java_lang_ClassCastException() &&
2037           name != vmSymbols::java_lang_InternalError() &&
2038           name != vmSymbols::java_lang_NullPointerException() &&
2039           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
2040         ResourceMark rm;
2041         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
2042       }
2043     }
2044   }
2045 }
2046 
2047 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2048 HeapShared::PendingOop HeapShared::_object_being_archived;
2049 size_t HeapShared::_num_new_walked_objs;
2050 size_t HeapShared::_num_new_archived_objs;
< prev index next >