< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

  46 #include "classfile/javaClasses.inline.hpp"
  47 #include "classfile/modules.hpp"
  48 #include "classfile/stringTable.hpp"
  49 #include "classfile/symbolTable.hpp"
  50 #include "classfile/systemDictionary.hpp"
  51 #include "classfile/systemDictionaryShared.hpp"
  52 #include "classfile/vmClasses.hpp"
  53 #include "classfile/vmSymbols.hpp"
  54 #include "gc/shared/collectedHeap.hpp"
  55 #include "gc/shared/gcLocker.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "logging/log.hpp"
  58 #include "logging/logStream.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/fieldStreams.inline.hpp"
  64 #include "oops/objArrayOop.inline.hpp"
  65 #include "oops/oop.inline.hpp"

  66 #include "oops/oopHandle.inline.hpp"
  67 #include "oops/typeArrayOop.inline.hpp"
  68 #include "prims/jvmtiExport.hpp"
  69 #include "runtime/arguments.hpp"
  70 #include "runtime/fieldDescriptor.inline.hpp"
  71 #include "runtime/globals_extension.hpp"
  72 #include "runtime/init.hpp"
  73 #include "runtime/javaCalls.hpp"
  74 #include "runtime/mutexLocker.hpp"
  75 #include "runtime/safepointVerifiers.hpp"
  76 #include "utilities/bitMap.inline.hpp"
  77 #include "utilities/copy.hpp"
  78 #if INCLUDE_G1GC
  79 #include "gc/g1/g1CollectedHeap.hpp"
  80 #endif
  81 
  82 #if INCLUDE_CDS_JAVA_HEAP
  83 
  84 struct ArchivableStaticFieldInfo {
  85   const char* klass_name;

 435       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 436     } else {
 437       // Finish up archived heap initialization. These must be
 438       // done after ReadClosure.
 439       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 440     }
 441   }
 442 }
 443 
 444 void HeapShared::make_archived_object_cache_gc_safe() {
 445   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 446 
 447   // It's safe to change the behavior of the hash function now, because iterate_all()
 448   // doesn't call the hash function.
 449   //  See archived_object_cache_hash() for more details.
 450   assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
 451   _use_identity_hash_for_archived_object_cache = true;
 452 
 453   // Copy all CachedOopInfo into a new table using a different hashing algorithm
 454   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
 455       new_cache->put_when_absent(oh, info);
 456     });

















 457 
 458   destroy_archived_object_cache();
 459   _archived_object_cache = new_cache;
 460 }
 461 
 462 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 463   OopHandle oh(Universe::vm_global(), obj);
 464   CachedOopInfo* result = _archived_object_cache->get(oh);
 465   oh.release(Universe::vm_global());
 466   return result;
 467 }
 468 
 469 bool HeapShared::has_been_archived(oop obj) {
 470   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 471   return get_cached_oop_info(obj) != nullptr;
 472 }
 473 
 474 int HeapShared::append_root(oop obj) {
 475   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
 476   assert(CDSConfig::is_dumping_heap(), "dump-time only");

 677     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 678     OopHandle handle(Universe::vm_global(), o);
 679     put_when_absent(ptr, handle);
 680   }
 681   void remove_oop(MetaspaceObj* ptr) {
 682     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 683     OopHandle* handle = get(ptr);
 684     if (handle != nullptr) {
 685       handle->release(Universe::vm_global());
 686       remove(ptr);
 687     }
 688   }
 689 };
 690 
 691 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 692   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 693     _scratch_objects_table->set_oop(src, dest);
 694   }
 695 }
 696 
 697 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 698   return (objArrayOop)_scratch_objects_table->get_oop(src);

 699 }
 700 
 701 void HeapShared::remove_scratch_resolved_references(ConstantPool* src) {
 702   if (CDSConfig::is_dumping_heap()) {
 703     _scratch_objects_table->remove_oop(src);
 704   }
 705 }
 706 
 707 void HeapShared::init_dumping() {
 708   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 709   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 710   _pending_roots->append(nullptr); // root index 0 represents a null oop
 711   DEBUG_ONLY(_dumptime_classes_with_cached_oops = new (mtClassShared)ArchivableKlassTable());
 712 }
 713 
 714 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 715   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 716     BasicType bt = (BasicType)i;
 717     if (!is_reference_type(bt)) {
 718       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);

 875     assert(success, "sanity");
 876   }
 877 
 878   if (log_is_enabled(Debug, aot, init)) {
 879     ResourceMark rm;
 880     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 881                          ik->is_hidden() ? " (hidden)" : "",
 882                          ik->is_enum_subclass() ? " (enum)" : "");
 883   }
 884 }
 885 
 886 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 887   // We need to retain the identity_hash, because it may have been used by some hashtables
 888   // in the shared heap.
 889   if (!orig_mirror->fast_no_hash_check()) {
 890     intptr_t src_hash = orig_mirror->identity_hash();
 891     if (UseCompactObjectHeaders) {
 892       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 893       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 894     } else {

 895       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 896     }
 897     assert(scratch_m->mark().is_unlocked(), "sanity");
 898 
 899     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 900     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 901   }
 902 



















 903   if (CDSConfig::is_dumping_aot_linked_classes()) {
 904     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 905     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 906   }
 907 }
 908 
 909 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 910   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 911     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 912     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 913       return HeapShared::scratch_resolved_references(src_ik->constants());
 914     }
 915   }
 916   return nullptr;
 917 }
 918 
 919 int HeapShared::archive_exception_instance(oop exception) {
 920   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 921   assert(success, "sanity");
 922   return append_root(exception);

1099     // the original Klass*
1100     if (orig_k == vmClasses::String_klass() ||
1101         orig_k == vmClasses::Object_klass()) {
1102       // Initialized early during VM initialization. No need to be added
1103       // to the sub-graph object class list.
1104       return;
1105     }
1106     check_allowed_klass(InstanceKlass::cast(orig_k));
1107   } else if (orig_k->is_objArray_klass()) {
1108     Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass();
1109     if (abk->is_instance_klass()) {
1110       assert(InstanceKlass::cast(abk)->defined_by_boot_loader(),
1111             "must be boot class");
1112       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1113     }
1114     if (orig_k == Universe::objectArrayKlass()) {
1115       // Initialized early during Universe::genesis. No need to be added
1116       // to the list.
1117       return;
1118     }



1119   } else {
1120     assert(orig_k->is_typeArray_klass(), "must be");
1121     // Primitive type arrays are created early during Universe::genesis.
1122     return;
1123   }
1124 
1125   if (log_is_enabled(Debug, aot, heap)) {
1126     if (!_subgraph_object_klasses->contains(orig_k)) {
1127       ResourceMark rm;
1128       log_debug(aot, heap)("Adding klass %s", orig_k->external_name());
1129     }
1130   }
1131 
1132   _subgraph_object_klasses->append_if_missing(orig_k);
1133 }
1134 
1135 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1136 #ifndef PRODUCT
1137   if (AOTClassInitializer::has_test_class()) {
1138     // The tests can cache arbitrary types of objects.

1474     _test_class = k;
1475     _test_class_record = record;
1476   }
1477 #endif
1478 
1479   // Initialize from archived data. Currently this is done only
1480   // during VM initialization time. No lock is needed.
1481   if (record == nullptr) {
1482     if (log_is_enabled(Info, aot, heap)) {
1483       ResourceMark rm(THREAD);
1484       log_info(aot, heap)("subgraph %s is not recorded",
1485                           k->external_name());
1486     }
1487     return nullptr;
1488   } else {
1489     if (log_is_enabled(Info, aot, heap)) {
1490       ResourceMark rm;
1491       log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1492     }
1493 














1494     resolve_or_init(k, do_init, CHECK_NULL);
1495 
1496     // Load/link/initialize the klasses of the objects in the subgraph.
1497     // nullptr class loader is used.
1498     Array<Klass*>* klasses = record->subgraph_object_klasses();
1499     if (klasses != nullptr) {
1500       for (int i = 0; i < klasses->length(); i++) {
1501         Klass* klass = klasses->at(i);
1502         if (!klass->in_aot_cache()) {
1503           return nullptr;
1504         }
1505         resolve_or_init(klass, do_init, CHECK_NULL);
1506       }
1507     }
1508   }
1509 
1510   return record;
1511 }
1512 
1513 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1514   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1515   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1516   if (k == nullptr) {
1517     return;
1518   }
1519   assert(k->defined_by_boot_loader(), "sanity");
1520   resolve_or_init(k, false, CHECK);
1521   if (do_init) {
1522     resolve_or_init(k, true, CHECK);
1523   }
1524 }
1525 
1526 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1527   if (!do_init) {
1528     if (k->class_loader_data() == nullptr) {
1529       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1530       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");




1531     }
1532   } else {
1533     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1534     if (k->is_instance_klass()) {
1535       InstanceKlass* ik = InstanceKlass::cast(k);
1536       ik->initialize(CHECK);
1537     } else if (k->is_objArray_klass()) {
1538       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1539       oak->initialize(CHECK);
1540     }
1541   }
1542 }
1543 
1544 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1545   verify_the_heap(k, "before");
1546 
1547   Array<int>* entry_field_records = record->entry_field_records();
1548   if (entry_field_records != nullptr) {
1549     int efr_len = entry_field_records->length();
1550     assert(efr_len % 2 == 0, "sanity");

1977   }
1978 }
1979 #endif
1980 
1981 void HeapShared::check_special_subgraph_classes() {
1982   if (CDSConfig::is_dumping_aot_linked_classes()) {
1983     // We can have aot-initialized classes (such as Enums) that can reference objects
1984     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1985     // aot-initialize classes that are "safe".
1986     //
1987     // TODO: we need an automatic tool that checks the safety of aot-initialized
1988     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
1989     return;
1990   } else {
1991     // In this case, the special subgraph should contain a few specific types
1992     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
1993     int num = klasses->length();
1994     for (int i = 0; i < num; i++) {
1995       Klass* subgraph_k = klasses->at(i);
1996       Symbol* name = subgraph_k->name();
1997       if (subgraph_k->is_instance_klass() &&

1998           name != vmSymbols::java_lang_Class() &&
1999           name != vmSymbols::java_lang_String() &&
2000           name != vmSymbols::java_lang_ArithmeticException() &&
2001           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
2002           name != vmSymbols::java_lang_ArrayStoreException() &&
2003           name != vmSymbols::java_lang_ClassCastException() &&
2004           name != vmSymbols::java_lang_InternalError() &&
2005           name != vmSymbols::java_lang_NullPointerException() &&
2006           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
2007         ResourceMark rm;
2008         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
2009       }
2010     }
2011   }
2012 }
2013 
2014 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2015 HeapShared::PendingOop HeapShared::_object_being_archived;
2016 size_t HeapShared::_num_new_walked_objs;
2017 size_t HeapShared::_num_new_archived_objs;

  46 #include "classfile/javaClasses.inline.hpp"
  47 #include "classfile/modules.hpp"
  48 #include "classfile/stringTable.hpp"
  49 #include "classfile/symbolTable.hpp"
  50 #include "classfile/systemDictionary.hpp"
  51 #include "classfile/systemDictionaryShared.hpp"
  52 #include "classfile/vmClasses.hpp"
  53 #include "classfile/vmSymbols.hpp"
  54 #include "gc/shared/collectedHeap.hpp"
  55 #include "gc/shared/gcLocker.hpp"
  56 #include "gc/shared/gcVMOperations.hpp"
  57 #include "logging/log.hpp"
  58 #include "logging/logStream.hpp"
  59 #include "memory/iterator.inline.hpp"
  60 #include "memory/resourceArea.hpp"
  61 #include "memory/universe.hpp"
  62 #include "oops/compressedOops.inline.hpp"
  63 #include "oops/fieldStreams.inline.hpp"
  64 #include "oops/objArrayOop.inline.hpp"
  65 #include "oops/oop.inline.hpp"
  66 #include "oops/oopCast.inline.hpp"
  67 #include "oops/oopHandle.inline.hpp"
  68 #include "oops/typeArrayOop.inline.hpp"
  69 #include "prims/jvmtiExport.hpp"
  70 #include "runtime/arguments.hpp"
  71 #include "runtime/fieldDescriptor.inline.hpp"
  72 #include "runtime/globals_extension.hpp"
  73 #include "runtime/init.hpp"
  74 #include "runtime/javaCalls.hpp"
  75 #include "runtime/mutexLocker.hpp"
  76 #include "runtime/safepointVerifiers.hpp"
  77 #include "utilities/bitMap.inline.hpp"
  78 #include "utilities/copy.hpp"
  79 #if INCLUDE_G1GC
  80 #include "gc/g1/g1CollectedHeap.hpp"
  81 #endif
  82 
  83 #if INCLUDE_CDS_JAVA_HEAP
  84 
  85 struct ArchivableStaticFieldInfo {
  86   const char* klass_name;

 436       AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
 437     } else {
 438       // Finish up archived heap initialization. These must be
 439       // done after ReadClosure.
 440       AOTMappedHeapLoader::finish_initialization(static_mapinfo);
 441     }
 442   }
 443 }
 444 
 445 void HeapShared::make_archived_object_cache_gc_safe() {
 446   ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
 447 
 448   // It's safe to change the behavior of the hash function now, because iterate_all()
 449   // doesn't call the hash function.
 450   //  See archived_object_cache_hash() for more details.
 451   assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
 452   _use_identity_hash_for_archived_object_cache = true;
 453 
 454   // Copy all CachedOopInfo into a new table using a different hashing algorithm
 455   archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
 456       if (Arguments::is_valhalla_enabled() && oh.resolve()->klass()->is_inline_klass()) {
 457         // After make_archived_object_cache_gc_safe() returns,
 458         // _archived_object_cache->get() is called only from the (future) AOT code
 459         // compiler to access heap oops referenced by AOT-compiled method.
 460         //
 461         // As planned in JDK 27 (JDK-8335368), AOT-compiled methods will only reference
 462         // oops that are Strings, mirrors, or exceptions, all of which are not value
 463         // objects.
 464         //
 465         // We exclude value objects from new_cache, as we don't know how to track them
 466         // after the GC moves them. This should be fixed when AOT-compiled methods
 467         // need to reference value objects.
 468         //
 469         // Also TODO: the AOT heap should de-duplicate value objects with identical
 470         // values.
 471       } else {
 472         new_cache->put_when_absent(oh, info);
 473       }
 474   });
 475 
 476   destroy_archived_object_cache();
 477   _archived_object_cache = new_cache;
 478 }
 479 
 480 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
 481   OopHandle oh(Universe::vm_global(), obj);
 482   CachedOopInfo* result = _archived_object_cache->get(oh);
 483   oh.release(Universe::vm_global());
 484   return result;
 485 }
 486 
 487 bool HeapShared::has_been_archived(oop obj) {
 488   assert(CDSConfig::is_dumping_heap(), "dump-time only");
 489   return get_cached_oop_info(obj) != nullptr;
 490 }
 491 
 492 int HeapShared::append_root(oop obj) {
 493   assert(SafepointSynchronize::is_at_safepoint(), "sanity");
 494   assert(CDSConfig::is_dumping_heap(), "dump-time only");

 695     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 696     OopHandle handle(Universe::vm_global(), o);
 697     put_when_absent(ptr, handle);
 698   }
 699   void remove_oop(MetaspaceObj* ptr) {
 700     MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
 701     OopHandle* handle = get(ptr);
 702     if (handle != nullptr) {
 703       handle->release(Universe::vm_global());
 704       remove(ptr);
 705     }
 706   }
 707 };
 708 
 709 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
 710   if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
 711     _scratch_objects_table->set_oop(src, dest);
 712   }
 713 }
 714 
 715 refArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
 716   oop rr = _scratch_objects_table->get_oop(src);
 717   return rr == nullptr ? nullptr : oop_cast<refArrayOop>(rr);
 718 }
 719 
 720 void HeapShared::remove_scratch_resolved_references(ConstantPool* src) {
 721   if (CDSConfig::is_dumping_heap()) {
 722     _scratch_objects_table->remove_oop(src);
 723   }
 724 }
 725 
 726 void HeapShared::init_dumping() {
 727   _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
 728   _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
 729   _pending_roots->append(nullptr); // root index 0 represents a null oop
 730   DEBUG_ONLY(_dumptime_classes_with_cached_oops = new (mtClassShared)ArchivableKlassTable());
 731 }
 732 
 733 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
 734   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
 735     BasicType bt = (BasicType)i;
 736     if (!is_reference_type(bt)) {
 737       oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);

 894     assert(success, "sanity");
 895   }
 896 
 897   if (log_is_enabled(Debug, aot, init)) {
 898     ResourceMark rm;
 899     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 900                          ik->is_hidden() ? " (hidden)" : "",
 901                          ik->is_enum_subclass() ? " (enum)" : "");
 902   }
 903 }
 904 
 905 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 906   // We need to retain the identity_hash, because it may have been used by some hashtables
 907   // in the shared heap.
 908   if (!orig_mirror->fast_no_hash_check()) {
 909     intptr_t src_hash = orig_mirror->identity_hash();
 910     if (UseCompactObjectHeaders) {
 911       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 912       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 913     } else {
 914       // For valhalla, the prototype header is the same as markWord::prototype();
 915       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 916     }
 917     assert(scratch_m->mark().is_unlocked(), "sanity");
 918 
 919     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 920     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 921   }
 922 
 923   Klass* k = java_lang_Class::as_Klass(orig_mirror);
 924   if (k != nullptr && k->is_instance_klass()) {
 925     InstanceKlass* ik = InstanceKlass::cast(k);
 926 
 927     if (ik->is_inline_klass() && ik->is_initialized()) {
 928       // Only concrete value classes need the null_reset field
 929       InlineKlass* ilk = InlineKlass::cast(k);
 930       if (ilk->supports_nullable_layouts()) {
 931         scratch_m->obj_field_put(ilk->null_reset_value_offset(), ilk->null_reset_value());
 932       }
 933     }
 934 
 935     if (ik->has_acmp_maps_offset()) {
 936       int maps_offset = ik->acmp_maps_offset();
 937       oop maps = orig_mirror->obj_field(maps_offset);
 938       scratch_m->obj_field_put(maps_offset, maps);
 939     }
 940   }
 941 
 942   if (CDSConfig::is_dumping_aot_linked_classes()) {
 943     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 944     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 945   }
 946 }
 947 
 948 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 949   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 950     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 951     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 952       return HeapShared::scratch_resolved_references(src_ik->constants());
 953     }
 954   }
 955   return nullptr;
 956 }
 957 
 958 int HeapShared::archive_exception_instance(oop exception) {
 959   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
 960   assert(success, "sanity");
 961   return append_root(exception);

1138     // the original Klass*
1139     if (orig_k == vmClasses::String_klass() ||
1140         orig_k == vmClasses::Object_klass()) {
1141       // Initialized early during VM initialization. No need to be added
1142       // to the sub-graph object class list.
1143       return;
1144     }
1145     check_allowed_klass(InstanceKlass::cast(orig_k));
1146   } else if (orig_k->is_objArray_klass()) {
1147     Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass();
1148     if (abk->is_instance_klass()) {
1149       assert(InstanceKlass::cast(abk)->defined_by_boot_loader(),
1150             "must be boot class");
1151       check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1152     }
1153     if (orig_k == Universe::objectArrayKlass()) {
1154       // Initialized early during Universe::genesis. No need to be added
1155       // to the list.
1156       return;
1157     }
1158     if (orig_k->is_flatArray_klass()) {
1159       _subgraph_object_klasses->append_if_missing(FlatArrayKlass::cast(orig_k)->element_klass());
1160     }
1161   } else {
1162     assert(orig_k->is_typeArray_klass(), "must be");
1163     // Primitive type arrays are created early during Universe::genesis.
1164     return;
1165   }
1166 
1167   if (log_is_enabled(Debug, aot, heap)) {
1168     if (!_subgraph_object_klasses->contains(orig_k)) {
1169       ResourceMark rm;
1170       log_debug(aot, heap)("Adding klass %s", orig_k->external_name());
1171     }
1172   }
1173 
1174   _subgraph_object_klasses->append_if_missing(orig_k);
1175 }
1176 
1177 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1178 #ifndef PRODUCT
1179   if (AOTClassInitializer::has_test_class()) {
1180     // The tests can cache arbitrary types of objects.

1516     _test_class = k;
1517     _test_class_record = record;
1518   }
1519 #endif
1520 
1521   // Initialize from archived data. Currently this is done only
1522   // during VM initialization time. No lock is needed.
1523   if (record == nullptr) {
1524     if (log_is_enabled(Info, aot, heap)) {
1525       ResourceMark rm(THREAD);
1526       log_info(aot, heap)("subgraph %s is not recorded",
1527                           k->external_name());
1528     }
1529     return nullptr;
1530   } else {
1531     if (log_is_enabled(Info, aot, heap)) {
1532       ResourceMark rm;
1533       log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1534     }
1535 
1536     Array<Klass*>* klasses = record->subgraph_object_klasses();
1537 
1538     if (do_init && klasses != nullptr) {
1539       // All the classes of the oops in this subgraph are in the klasses array.
1540       // Link them first in case any of the oops are used in the <clinit> methods
1541       // invoked in the rest of this function.
1542       for (int i = 0; i < klasses->length(); i++) {
1543         Klass* klass = klasses->at(i);
1544         if (klass->in_aot_cache() && klass->is_instance_klass()) {
1545           InstanceKlass::cast(klass)->link_class(CHECK_NULL);
1546         }
1547       }
1548     }
1549 
1550     resolve_or_init(k, do_init, CHECK_NULL);
1551 
1552     // Load/link/initialize the klasses of the objects in the subgraph.
1553     // nullptr class loader is used.

1554     if (klasses != nullptr) {
1555       for (int i = 0; i < klasses->length(); i++) {
1556         Klass* klass = klasses->at(i);
1557         if (!klass->in_aot_cache()) {
1558           return nullptr;
1559         }
1560         resolve_or_init(klass, do_init, CHECK_NULL);
1561       }
1562     }
1563   }
1564 
1565   return record;
1566 }
1567 
1568 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1569   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1570   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1571   if (k == nullptr) {
1572     return;
1573   }
1574   assert(k->defined_by_boot_loader(), "sanity");
1575   resolve_or_init(k, false, CHECK);
1576   if (do_init) {
1577     resolve_or_init(k, true, CHECK);
1578   }
1579 }
1580 
1581 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1582   if (!do_init) {
1583     if (k->class_loader_data() == nullptr) {
1584       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1585       if (resolved_k->is_array_klass()) {
1586         assert(resolved_k == k || resolved_k == k->super(), "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1587       } else {
1588         assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1589       }
1590     }
1591   } else {
1592     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1593     if (k->is_instance_klass()) {
1594       InstanceKlass* ik = InstanceKlass::cast(k);
1595       ik->initialize(CHECK);
1596     } else if (k->is_objArray_klass()) {
1597       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1598       oak->initialize(CHECK);
1599     }
1600   }
1601 }
1602 
1603 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1604   verify_the_heap(k, "before");
1605 
1606   Array<int>* entry_field_records = record->entry_field_records();
1607   if (entry_field_records != nullptr) {
1608     int efr_len = entry_field_records->length();
1609     assert(efr_len % 2 == 0, "sanity");

2036   }
2037 }
2038 #endif
2039 
2040 void HeapShared::check_special_subgraph_classes() {
2041   if (CDSConfig::is_dumping_aot_linked_classes()) {
2042     // We can have aot-initialized classes (such as Enums) that can reference objects
2043     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2044     // aot-initialize classes that are "safe".
2045     //
2046     // TODO: we need an automatic tool that checks the safety of aot-initialized
2047     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2048     return;
2049   } else {
2050     // In this case, the special subgraph should contain a few specific types
2051     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
2052     int num = klasses->length();
2053     for (int i = 0; i < num; i++) {
2054       Klass* subgraph_k = klasses->at(i);
2055       Symbol* name = subgraph_k->name();
2056 
2057       if (subgraph_k->is_identity_class() &&
2058           name != vmSymbols::java_lang_Class() &&
2059           name != vmSymbols::java_lang_String() &&
2060           name != vmSymbols::java_lang_ArithmeticException() &&
2061           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
2062           name != vmSymbols::java_lang_ArrayStoreException() &&
2063           name != vmSymbols::java_lang_ClassCastException() &&
2064           name != vmSymbols::java_lang_InternalError() &&
2065           name != vmSymbols::java_lang_NullPointerException() &&
2066           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
2067         ResourceMark rm;
2068         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
2069       }
2070     }
2071   }
2072 }
2073 
2074 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2075 HeapShared::PendingOop HeapShared::_object_being_archived;
2076 size_t HeapShared::_num_new_walked_objs;
2077 size_t HeapShared::_num_new_archived_objs;
< prev index next >