6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotMappedHeapLoader.hpp"
30 #include "cds/aotMappedHeapWriter.hpp"
31 #include "cds/aotMetaspace.hpp"
32 #include "cds/aotOopChecker.hpp"
33 #include "cds/aotReferenceObjSupport.hpp"
34 #include "cds/aotStreamedHeapLoader.hpp"
35 #include "cds/aotStreamedHeapWriter.hpp"
36 #include "cds/archiveBuilder.hpp"
37 #include "cds/archiveUtils.hpp"
38 #include "cds/cds_globals.hpp"
39 #include "cds/cdsConfig.hpp"
40 #include "cds/cdsEnumKlass.hpp"
41 #include "cds/cdsHeapVerifier.hpp"
42 #include "cds/heapShared.inline.hpp"
43 #include "cds/regeneratedClasses.hpp"
44 #include "classfile/classLoaderData.hpp"
45 #include "classfile/javaClasses.inline.hpp"
46 #include "classfile/modules.hpp"
47 #include "classfile/stringTable.hpp"
78 #include "gc/g1/g1CollectedHeap.hpp"
79 #endif
80
81 #if INCLUDE_CDS_JAVA_HEAP
82
83 struct ArchivableStaticFieldInfo {
84 const char* klass_name;
85 const char* field_name;
86 InstanceKlass* klass;
87 int offset;
88 BasicType type;
89
90 ArchivableStaticFieldInfo(const char* k, const char* f)
91 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
92
93 bool valid() {
94 return klass_name != nullptr;
95 }
96 };
97
98 // Anything that goes in the header must be thoroughly purged from uninitialized memory
99 // as it will be written to disk. Therefore, the constructors memset the memory to 0.
100 // This is not the prettiest thing, but we need to know every byte is initialized,
101 // including potential padding between fields.
102
103 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
104 size_t oopmap_start_pos,
105 HeapRootSegments root_segments) {
106 memset((char*)this, 0, sizeof(*this));
107 _ptrmap_start_pos = ptrmap_start_pos;
108 _oopmap_start_pos = oopmap_start_pos;
109 _root_segments = root_segments;
110 }
111
112 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
113 memset((char*)this, 0, sizeof(*this));
114 }
115
116 ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
117 return ArchiveMappedHeapHeader{_ptrmap_start_pos,
161 #endif
162
163
164 //
165 // If you add new entries to the following tables, you should know what you're doing!
166 //
167
168 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
169 {"java/lang/Integer$IntegerCache", "archivedCache"},
170 {"java/lang/Long$LongCache", "archivedCache"},
171 {"java/lang/Byte$ByteCache", "archivedCache"},
172 {"java/lang/Short$ShortCache", "archivedCache"},
173 {"java/lang/Character$CharacterCache", "archivedCache"},
174 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
175 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
176 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
177 {"java/util/ImmutableCollections", "archivedObjects"},
178 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
179 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
180 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
181
182 #ifndef PRODUCT
183 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
184 #endif
185 {nullptr, nullptr},
186 };
187
188 // full module graph
189 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
190 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
191 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
192 {"java/lang/Module$ArchivedData", "archivedData"},
193 {nullptr, nullptr},
194 };
195
196 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
197 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
198 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
199 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
200 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
201
202 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
203 for (int i = 0; fields[i].valid(); i++) {
204 if (fields[i].klass == ik) {
205 return true;
206 }
207 }
208 return false;
209 }
210
211 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
212 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
213 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
214 }
215
216 oop HeapShared::CachedOopInfo::orig_referrer() const {
217 return _orig_referrer.resolve();
218 }
425 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
426 OopHandle oh(Universe::vm_global(), obj);
427 CachedOopInfo* result = _archived_object_cache->get(oh);
428 oh.release(Universe::vm_global());
429 return result;
430 }
431
432 bool HeapShared::has_been_archived(oop obj) {
433 assert(CDSConfig::is_dumping_heap(), "dump-time only");
434 return get_cached_oop_info(obj) != nullptr;
435 }
436
437 int HeapShared::append_root(oop obj) {
438 assert(CDSConfig::is_dumping_heap(), "dump-time only");
439 if (obj != nullptr) {
440 assert(has_been_archived(obj), "must be");
441 }
442 // No GC should happen since we aren't scanning _pending_roots.
443 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
444
445 return _pending_roots->append(obj);
446 }
447
448 oop HeapShared::get_root(int index, bool clear) {
449 assert(index >= 0, "sanity");
450 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
451 assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
452
453 oop result;
454 if (HeapShared::is_loading_streaming_mode()) {
455 result = AOTStreamedHeapLoader::get_root(index);
456 } else {
457 assert(HeapShared::is_loading_mapping_mode(), "must be");
458 result = AOTMappedHeapLoader::get_root(index);
459 }
460
461 if (clear) {
462 clear_root(index);
463 }
464
465 return result;
466 }
467
598 void remove_oop(MetaspaceObj* ptr) {
599 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
600 OopHandle* handle = get(ptr);
601 if (handle != nullptr) {
602 handle->release(Universe::vm_global());
603 remove(ptr);
604 }
605 }
606 };
607
608 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
609 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
610 _scratch_objects_table->set_oop(src, dest);
611 }
612 }
613
614 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
615 return (objArrayOop)_scratch_objects_table->get_oop(src);
616 }
617
618 void HeapShared::init_dumping() {
619 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
620 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
621 }
622
623 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
624 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
625 BasicType bt = (BasicType)i;
626 if (!is_reference_type(bt)) {
627 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
628 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
629 }
630 }
631 }
632
633 // Given java_mirror that represents a (primitive or reference) type T,
634 // return the "scratch" version that represents the same type T.
635 // Note that if java_mirror will be returned if it's already a
636 // scratch mirror.
637 //
638 // See java_lang_Class::create_scratch_mirror() for more info.
639 oop HeapShared::scratch_java_mirror(oop java_mirror) {
640 assert(java_lang_Class::is_instance(java_mirror), "must be");
718 if (RegeneratedClasses::is_regenerated_object(ik)) {
719 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
720 precond(orig_ik->is_initialized());
721 orig_mirror = orig_ik->java_mirror();
722 } else {
723 precond(ik->is_initialized());
724 orig_mirror = ik->java_mirror();
725 }
726
727 oop m = scratch_java_mirror(ik);
728 int nfields = 0;
729 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
730 if (fs.access_flags().is_static()) {
731 fieldDescriptor& fd = fs.field_descriptor();
732 int offset = fd.offset();
733 switch (fd.field_type()) {
734 case T_OBJECT:
735 case T_ARRAY:
736 {
737 oop field_obj = orig_mirror->obj_field(offset);
738 if (offset == java_lang_Class::reflection_data_offset()) {
739 // Class::reflectData use SoftReference, which cannot be archived. Set it
740 // to null and it will be recreated at runtime.
741 field_obj = nullptr;
742 }
743 m->obj_field_put(offset, field_obj);
744 if (field_obj != nullptr) {
745 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
746 assert(success, "sanity");
747 }
748 }
749 break;
750 case T_BOOLEAN:
751 m->bool_field_put(offset, orig_mirror->bool_field(offset));
752 break;
753 case T_BYTE:
754 m->byte_field_put(offset, orig_mirror->byte_field(offset));
755 break;
756 case T_SHORT:
757 m->short_field_put(offset, orig_mirror->short_field(offset));
758 break;
759 case T_CHAR:
760 m->char_field_put(offset, orig_mirror->char_field(offset));
761 break;
762 case T_INT:
797 // We need to retain the identity_hash, because it may have been used by some hashtables
798 // in the shared heap.
799 if (!orig_mirror->fast_no_hash_check()) {
800 intptr_t src_hash = orig_mirror->identity_hash();
801 if (UseCompactObjectHeaders) {
802 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
803 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
804 } else {
805 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
806 }
807 assert(scratch_m->mark().is_unlocked(), "sanity");
808
809 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
810 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
811 }
812
813 if (CDSConfig::is_dumping_aot_linked_classes()) {
814 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
815 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
816 }
817 }
818
819 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
820 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
821 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
822 if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
823 return HeapShared::scratch_resolved_references(src_ik->constants());
824 }
825 }
826 return nullptr;
827 }
828
829 void HeapShared::archive_strings() {
830 assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
831 oop shared_strings_array = StringTable::init_shared_strings_array();
832 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
833 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
834 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
835 }
836
847 has_oop_pointers = info->has_oop_pointers();
848 has_native_pointers = info->has_native_pointers();
849 }
850
851 void HeapShared::set_has_native_pointers(oop src_obj) {
852 OopHandle oh(&src_obj);
853 CachedOopInfo* info = archived_object_cache()->get(oh);
854 assert(info != nullptr, "must be");
855 info->set_has_native_pointers();
856 }
857
858 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
859 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
860 void HeapShared::start_scanning_for_oops() {
861 {
862 NoSafepointVerifier nsv;
863
864 // The special subgraph doesn't belong to any class. We use Object_klass() here just
865 // for convenience.
866 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
867
868 // Cache for recording where the archived objects are copied to
869 create_archived_object_cache();
870
871 if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {
872 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
873 UseCompressedOops ? p2i(CompressedOops::begin()) :
874 p2i((address)G1CollectedHeap::heap()->reserved().start()),
875 UseCompressedOops ? p2i(CompressedOops::end()) :
876 p2i((address)G1CollectedHeap::heap()->reserved().end()));
877 }
878
879 archive_subgraphs();
880 }
881
882 init_seen_objects_table();
883 Universe::archive_exception_instances();
884 }
885
886 void HeapShared::end_scanning_for_oops() {
887 if (is_writing_mapping_mode()) {
888 archive_strings();
889 }
890 delete_seen_objects_table();
891 }
892
893 void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
894 {
895 NoSafepointVerifier nsv;
896 CDSHeapVerifier::verify();
897 check_special_subgraph_classes();
898 }
899
900 if (HeapShared::is_writing_mapping_mode()) {
901 StringTable::write_shared_table();
902 AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
903 } else {
904 assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
905 AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
906 }
907
908 ArchiveBuilder::OtherROAllocMark mark;
909 write_subgraph_info_table();
910 }
911
912 void HeapShared::scan_java_mirror(oop orig_mirror) {
913 oop m = scratch_java_mirror(orig_mirror);
914 if (m != nullptr) { // nullptr if for custom class loader
915 copy_java_mirror(orig_mirror, m);
916 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
917 assert(success, "sanity");
918 }
919 }
920
921 void HeapShared::scan_java_class(Klass* orig_k) {
922 scan_java_mirror(orig_k->java_mirror());
923
924 if (orig_k->is_instance_klass()) {
925 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
926 orig_ik->constants()->prepare_resolved_references_for_archiving();
927 objArrayOop rr = get_archived_resolved_references(orig_ik);
928 if (rr != nullptr) {
929 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
930 assert(success, "must be");
931 }
932 }
933 }
934
935 void HeapShared::archive_subgraphs() {
936 assert(CDSConfig::is_dumping_heap(), "must be");
937
962 &created);
963 assert(created, "must not initialize twice");
964 return info;
965 }
966
967 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
968 assert(CDSConfig::is_dumping_heap(), "dump time only");
969 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
970 assert(info != nullptr, "must have been initialized");
971 return info;
972 }
973
974 // Add an entry field to the current KlassSubGraphInfo.
975 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
976 assert(CDSConfig::is_dumping_heap(), "dump time only");
977 if (_subgraph_entry_fields == nullptr) {
978 _subgraph_entry_fields =
979 new (mtClass) GrowableArray<int>(10, mtClass);
980 }
981 _subgraph_entry_fields->append(static_field_offset);
982 _subgraph_entry_fields->append(HeapShared::append_root(v));
983 }
984
985 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
986 // Only objects of boot classes can be included in sub-graph.
987 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
988 assert(CDSConfig::is_dumping_heap(), "dump time only");
989
990 if (_subgraph_object_klasses == nullptr) {
991 _subgraph_object_klasses =
992 new (mtClass) GrowableArray<Klass*>(50, mtClass);
993 }
994
995 if (_k == orig_k) {
996 // Don't add the Klass containing the sub-graph to it's own klass
997 // initialization list.
998 return;
999 }
1000
1001 if (orig_k->is_instance_klass()) {
1002 #ifdef ASSERT
1279 which, k->external_name());
1280 FlagSetting fs1(VerifyBeforeGC, true);
1281 FlagSetting fs2(VerifyDuringGC, true);
1282 FlagSetting fs3(VerifyAfterGC, true);
1283 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1284 }
1285 }
1286 }
1287
1288 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1289 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1290 //
1291 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1292 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1293 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1294 void HeapShared::resolve_classes(JavaThread* current) {
1295 assert(CDSConfig::is_using_archive(), "runtime only!");
1296 if (!is_archived_heap_in_use()) {
1297 return; // nothing to do
1298 }
1299 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1300 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1301 }
1302
1303 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1304 for (int i = 0; fields[i].valid(); i++) {
1305 ArchivableStaticFieldInfo* info = &fields[i];
1306 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1307 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1308 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1309 resolve_classes_for_subgraph_of(current, k);
1310 }
1311 }
1312
1313 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1314 JavaThread* THREAD = current;
1315 ExceptionMark em(THREAD);
1316 const ArchivedKlassSubGraphInfoRecord* record =
1317 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1318 if (HAS_PENDING_EXCEPTION) {
1319 CLEAR_PENDING_EXCEPTION;
1320 }
1321 if (record == nullptr) {
1322 clear_archived_roots_of(k);
1323 }
1324 }
1325
1326 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1327 if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1328 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1329 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1330 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1331 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1332 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1333 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1334 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1335 }
1336 }
1337
1338 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1339 // - interned strings
1340 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1341 // - ConstantPool::resolved_references()
1342 // - Universe::<xxx>_exception_instance()
1343 //
1344 // For example, if this enum class is initialized at AOT cache assembly time:
1345 //
1346 // enum Fruit {
1347 // APPLE, ORANGE, BANANA;
1348 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1349 // }
1350 //
1351 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1352 // should be initialized before any Java code can access the Fruit class. Note that
1353 // HashSet itself doesn't necessary need to be an aot-initialized class.
1354 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
1355 if (!is_archived_heap_in_use()) {
1512 ik->initialize(CHECK);
1513 } else if (k->is_objArray_klass()) {
1514 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1515 oak->initialize(CHECK);
1516 }
1517 }
1518 }
1519
1520 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1521 verify_the_heap(k, "before");
1522
1523 Array<int>* entry_field_records = record->entry_field_records();
1524 if (entry_field_records != nullptr) {
1525 int efr_len = entry_field_records->length();
1526 assert(efr_len % 2 == 0, "sanity");
1527 for (int i = 0; i < efr_len; i += 2) {
1528 int field_offset = entry_field_records->at(i);
1529 int root_index = entry_field_records->at(i+1);
1530 // Load the subgraph entry fields from the record and store them back to
1531 // the corresponding fields within the mirror.
1532 oop v = get_root(root_index, /*clear=*/true);
1533 oop m = k->java_mirror();
1534 if (k->has_aot_initialized_mirror()) {
1535 assert(v == m->obj_field(field_offset), "must be aot-initialized");
1536 } else {
1537 m->obj_field_put(field_offset, v);
1538 }
1539 log_debug(aot, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1540 }
1541
1542 // Done. Java code can see the archived sub-graphs referenced from k's
1543 // mirror after this point.
1544 if (log_is_enabled(Info, aot, heap)) {
1545 ResourceMark rm;
1546 log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1547 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1548 k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1549 }
1550 }
1551
1552 verify_the_heap(k, "after ");
1654 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1655 PointsToOopsChecker points_to_oops_checker;
1656 obj->oop_iterate(&points_to_oops_checker);
1657 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1658 }
1659
1660 void HeapShared::init_box_classes(TRAPS) {
1661 if (is_archived_heap_in_use()) {
1662 vmClasses::Boolean_klass()->initialize(CHECK);
1663 vmClasses::Character_klass()->initialize(CHECK);
1664 vmClasses::Float_klass()->initialize(CHECK);
1665 vmClasses::Double_klass()->initialize(CHECK);
1666 vmClasses::Byte_klass()->initialize(CHECK);
1667 vmClasses::Short_klass()->initialize(CHECK);
1668 vmClasses::Integer_klass()->initialize(CHECK);
1669 vmClasses::Long_klass()->initialize(CHECK);
1670 vmClasses::Void_klass()->initialize(CHECK);
1671 }
1672 }
1673
1674 // (1) If orig_obj has not been archived yet, archive it.
1675 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1676 // trace all objects that are reachable from it, and make sure these objects are archived.
1677 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1678 // were already archived when this function is called)
1679 bool HeapShared::archive_reachable_objects_from(int level,
1680 KlassSubGraphInfo* subgraph_info,
1681 oop orig_obj) {
1682 assert(orig_obj != nullptr, "must be");
1683 PendingOopStack stack;
1684 stack.push(PendingOop(orig_obj, nullptr, level));
1685
1686 while (stack.length() > 0) {
1687 PendingOop po = stack.pop();
1688 _object_being_archived = po;
1689 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1690 _object_being_archived = PendingOop();
1691
1692 if (!status) {
1693 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1920 verify_subgraph_from(f);
1921 }
1922 }
1923
1924 void HeapShared::verify_subgraph_from(oop orig_obj) {
1925 if (!has_been_archived(orig_obj)) {
1926 // It's OK for the root of a subgraph to be not archived. See comments in
1927 // archive_reachable_objects_from().
1928 return;
1929 }
1930
1931 // Verify that all objects reachable from orig_obj are archived.
1932 init_seen_objects_table();
1933 verify_reachable_objects_from(orig_obj);
1934 delete_seen_objects_table();
1935 }
1936
1937 void HeapShared::verify_reachable_objects_from(oop obj) {
1938 _num_total_verifications ++;
1939 if (java_lang_Class::is_instance(obj)) {
1940 obj = scratch_java_mirror(obj);
1941 assert(obj != nullptr, "must be");
1942 }
1943 if (!has_been_seen_during_subgraph_recording(obj)) {
1944 set_has_been_seen_during_subgraph_recording(obj);
1945 assert(has_been_archived(obj), "must be");
1946 VerifySharedOopClosure walker;
1947 obj->oop_iterate(&walker);
1948 }
1949 }
1950 #endif
1951
1952 void HeapShared::check_special_subgraph_classes() {
1953 if (CDSConfig::is_initing_classes_at_dump_time()) {
1954 // We can have aot-initialized classes (such as Enums) that can reference objects
1955 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1956 // aot-initialize classes that are "safe".
1957 //
1958 // TODO: we need an automatic tool that checks the safety of aot-initialized
1959 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2240
2241 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2242 bool is_full_module_graph) {
2243 _num_total_subgraph_recordings = 0;
2244 _num_total_walked_objs = 0;
2245 _num_total_archived_objs = 0;
2246 _num_total_recorded_klasses = 0;
2247 _num_total_verifications = 0;
2248
2249 // For each class X that has one or more archived fields:
2250 // [1] Dump the subgraph of each archived field
2251 // [2] Create a list of all the class of the objects that can be reached
2252 // by any of these static fields.
2253 // At runtime, these classes are initialized before X's archived fields
2254 // are restored by HeapShared::initialize_from_archived_subgraph().
2255 for (int i = 0; fields[i].valid(); ) {
2256 ArchivableStaticFieldInfo* info = &fields[i];
2257 const char* klass_name = info->klass_name;
2258 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2259
2260 // If you have specified consecutive fields of the same klass in
2261 // fields[], these will be archived in the same
2262 // {start_recording_subgraph ... done_recording_subgraph} pass to
2263 // save time.
2264 for (; fields[i].valid(); i++) {
2265 ArchivableStaticFieldInfo* f = &fields[i];
2266 if (f->klass_name != klass_name) {
2267 break;
2268 }
2269
2270 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2271 f->offset, f->field_name);
2272 }
2273 done_recording_subgraph(info->klass, klass_name);
2274 }
2275
2276 log_info(aot, heap)("Archived subgraph records = %zu",
2277 _num_total_subgraph_recordings);
2278 log_info(aot, heap)(" Walked %zu objects", _num_total_walked_objs);
2279 log_info(aot, heap)(" Archived %zu objects", _num_total_archived_objs);
2280 log_info(aot, heap)(" Recorded %zu klasses", _num_total_recorded_klasses);
2281
2282 #ifndef PRODUCT
2283 for (int i = 0; fields[i].valid(); i++) {
2284 ArchivableStaticFieldInfo* f = &fields[i];
2285 verify_subgraph_from_static_field(f->klass, f->offset);
2286 }
2287 log_info(aot, heap)(" Verified %zu references", _num_total_verifications);
2288 #endif
2289 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotCacheAccess.hpp"
27 #include "cds/aotClassInitializer.hpp"
28 #include "cds/aotClassLocation.hpp"
29 #include "cds/aotConstantPoolResolver.hpp"
30 #include "cds/aotLogging.hpp"
31 #include "cds/aotMappedHeapLoader.hpp"
32 #include "cds/aotMappedHeapWriter.hpp"
33 #include "cds/aotMetaspace.hpp"
34 #include "cds/aotOopChecker.hpp"
35 #include "cds/aotReferenceObjSupport.hpp"
36 #include "cds/aotStreamedHeapLoader.hpp"
37 #include "cds/aotStreamedHeapWriter.hpp"
38 #include "cds/archiveBuilder.hpp"
39 #include "cds/archiveUtils.hpp"
40 #include "cds/cds_globals.hpp"
41 #include "cds/cdsConfig.hpp"
42 #include "cds/cdsEnumKlass.hpp"
43 #include "cds/cdsHeapVerifier.hpp"
44 #include "cds/heapShared.inline.hpp"
45 #include "cds/regeneratedClasses.hpp"
46 #include "classfile/classLoaderData.hpp"
47 #include "classfile/javaClasses.inline.hpp"
48 #include "classfile/modules.hpp"
49 #include "classfile/stringTable.hpp"
80 #include "gc/g1/g1CollectedHeap.hpp"
81 #endif
82
83 #if INCLUDE_CDS_JAVA_HEAP
84
85 struct ArchivableStaticFieldInfo {
86 const char* klass_name;
87 const char* field_name;
88 InstanceKlass* klass;
89 int offset;
90 BasicType type;
91
92 ArchivableStaticFieldInfo(const char* k, const char* f)
93 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
94
95 bool valid() {
96 return klass_name != nullptr;
97 }
98 };
99
100 class HeapShared::ContextMark : public StackObj {
101 ResourceMark rm;
102 public:
103 ContextMark(const char* c) : rm{} {
104 _context->push(c);
105 }
106 ~ContextMark() {
107 _context->pop();
108 }
109 };
110
111 // Anything that goes in the header must be thoroughly purged from uninitialized memory
112 // as it will be written to disk. Therefore, the constructors memset the memory to 0.
113 // This is not the prettiest thing, but we need to know every byte is initialized,
114 // including potential padding between fields.
115
116 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
117 size_t oopmap_start_pos,
118 HeapRootSegments root_segments) {
119 memset((char*)this, 0, sizeof(*this));
120 _ptrmap_start_pos = ptrmap_start_pos;
121 _oopmap_start_pos = oopmap_start_pos;
122 _root_segments = root_segments;
123 }
124
125 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
126 memset((char*)this, 0, sizeof(*this));
127 }
128
129 ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
130 return ArchiveMappedHeapHeader{_ptrmap_start_pos,
174 #endif
175
176
177 //
178 // If you add new entries to the following tables, you should know what you're doing!
179 //
180
181 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
182 {"java/lang/Integer$IntegerCache", "archivedCache"},
183 {"java/lang/Long$LongCache", "archivedCache"},
184 {"java/lang/Byte$ByteCache", "archivedCache"},
185 {"java/lang/Short$ShortCache", "archivedCache"},
186 {"java/lang/Character$CharacterCache", "archivedCache"},
187 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
188 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
189 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
190 {"java/util/ImmutableCollections", "archivedObjects"},
191 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
192 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
193 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
194 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
195
196 #ifndef PRODUCT
197 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
198 #endif
199 {nullptr, nullptr},
200 };
201
202 // full module graph
203 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
204 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
205 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
206 {"java/lang/Module$ArchivedData", "archivedData"},
207 {nullptr, nullptr},
208 };
209
210 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
211 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
212 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
213 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
214 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
215 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
216
217 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
218 for (int i = 0; fields[i].valid(); i++) {
219 if (fields[i].klass == ik) {
220 return true;
221 }
222 }
223 return false;
224 }
225
226 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
227 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
228 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
229 }
230
231 oop HeapShared::CachedOopInfo::orig_referrer() const {
232 return _orig_referrer.resolve();
233 }
440 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
441 OopHandle oh(Universe::vm_global(), obj);
442 CachedOopInfo* result = _archived_object_cache->get(oh);
443 oh.release(Universe::vm_global());
444 return result;
445 }
446
447 bool HeapShared::has_been_archived(oop obj) {
448 assert(CDSConfig::is_dumping_heap(), "dump-time only");
449 return get_cached_oop_info(obj) != nullptr;
450 }
451
452 int HeapShared::append_root(oop obj) {
453 assert(CDSConfig::is_dumping_heap(), "dump-time only");
454 if (obj != nullptr) {
455 assert(has_been_archived(obj), "must be");
456 }
457 // No GC should happen since we aren't scanning _pending_roots.
458 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
459
460 OopHandle oh(Universe::vm_global(), obj);
461 return _pending_roots->append(oh);
462 }
463
464 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
465 // to Strings and MH oops.
466 //
467 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
468 // and are accssed vis AOTCacheAccess::get_archived_object(int).
469 struct PermanentOopInfo {
470 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
471 int _heap_offset; // Offset of the object from the bottom of the archived heap.
472 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
473 };
474
475 class PermanentOopTable: public HashTable<OopHandle, PermanentOopInfo,
476 36137, // prime number
477 AnyObj::C_HEAP,
478 mtClassShared,
479 HeapShared::oop_handle_hash,
480 HeapShared::oop_handle_equals> {};
481
482 static int _dumptime_permanent_oop_count = 0;
483 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
484 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
485
486 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
487 // so we can remember their offset (from the bottom of the archived heap).
488 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
489 assert_at_safepoint();
490 if (_dumptime_permanent_oop_table == nullptr) {
491 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
492 }
493
494 PermanentOopInfo info(-1, offset);
495 OopHandle oh(Universe::vm_global(), obj);
496 _dumptime_permanent_oop_table->put_when_absent(oh, info);
497 }
498
499 // A permanent index is assigned to an archived object ONLY when
500 // the AOT compiler calls this function.
501 int HeapShared::get_archived_object_permanent_index(oop obj) {
502 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
503
504 if (!CDSConfig::is_dumping_heap()) {
505 return -1; // Called by the Leyden old workflow
506 }
507 if (_dumptime_permanent_oop_table == nullptr) {
508 return -1;
509 }
510
511 if (java_lang_Class::is_instance(obj)) {
512 obj = scratch_java_mirror(obj);
513 }
514
515 OopHandle tmp(&obj);
516 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
517 if (info == nullptr) {
518 return -1;
519 } else {
520 if (info->_index < 0) {
521 info->_index = _dumptime_permanent_oop_count++;
522 }
523 return info->_index;
524 }
525 }
526
527 oop HeapShared::get_archived_object(int permanent_index) {
528 assert(permanent_index >= 0, "sanity");
529 assert(AOTMappedHeapLoader::is_in_use(), "sanity");
530 assert(_runtime_permanent_oops != nullptr, "sanity");
531
532 return _runtime_permanent_oops->at(permanent_index).resolve();
533 }
534
535 // Remember all archived heap objects that have a permanent index.
536 // table[i] = offset of oop whose permanent index is i.
537 void CachedCodeDirectoryInternal::dumptime_init_internal() {
538 const int count = _dumptime_permanent_oop_count;
539 if (count == 0) {
540 // Avoid confusing CDS code with zero-sized tables, just return.
541 log_info(cds)("No permanent oops");
542 _permanent_oop_count = count;
543 _permanent_oop_offsets = nullptr;
544 return;
545 }
546
547 int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
548 for (int i = 0; i < count; i++) {
549 table[count] = -1;
550 }
551 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
552 int index = info._index;
553 if (index >= 0) {
554 assert(index < count, "sanity");
555 table[index] = info._heap_offset;
556 }
557 return true; // continue
558 });
559
560 for (int i = 0; i < count; i++) {
561 assert(table[i] >= 0, "must be");
562 }
563
564 log_info(cds)("Dumped %d permanent oops", count);
565
566 _permanent_oop_count = count;
567 AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
568 }
569
570 // This is called during the bootstrap of the production run, before any GC can happen.
571 // Record each permanent oop in a OopHandle for GC safety.
572 void CachedCodeDirectoryInternal::runtime_init_internal() {
573 int count = _permanent_oop_count;
574 int* table = _permanent_oop_offsets;
575 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
576 for (int i = 0; i < count; i++) {
577 oop obj = HeapShared::is_loading_streaming_mode() ?
578 nullptr : /* FIXME not implemented */
579 AOTMappedHeapLoader::oop_from_offset(table[i]);
580 OopHandle oh(Universe::vm_global(), obj);
581 _runtime_permanent_oops->append(oh);
582 }
583 };
584
585 oop HeapShared::get_root(int index, bool clear) {
586 assert(index >= 0, "sanity");
587 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
588 assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
589
590 oop result;
591 if (HeapShared::is_loading_streaming_mode()) {
592 result = AOTStreamedHeapLoader::get_root(index);
593 } else {
594 assert(HeapShared::is_loading_mapping_mode(), "must be");
595 result = AOTMappedHeapLoader::get_root(index);
596 }
597
598 if (clear) {
599 clear_root(index);
600 }
601
602 return result;
603 }
604
735 void remove_oop(MetaspaceObj* ptr) {
736 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
737 OopHandle* handle = get(ptr);
738 if (handle != nullptr) {
739 handle->release(Universe::vm_global());
740 remove(ptr);
741 }
742 }
743 };
744
745 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
746 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
747 _scratch_objects_table->set_oop(src, dest);
748 }
749 }
750
751 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
752 return (objArrayOop)_scratch_objects_table->get_oop(src);
753 }
754
755 void HeapShared::init_dumping() {
756 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
757 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
758 }
759
760 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
761 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
762 BasicType bt = (BasicType)i;
763 if (!is_reference_type(bt)) {
764 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
765 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
766 }
767 }
768 }
769
770 // Given java_mirror that represents a (primitive or reference) type T,
771 // return the "scratch" version that represents the same type T.
772 // Note that if java_mirror will be returned if it's already a
773 // scratch mirror.
774 //
775 // See java_lang_Class::create_scratch_mirror() for more info.
776 oop HeapShared::scratch_java_mirror(oop java_mirror) {
777 assert(java_lang_Class::is_instance(java_mirror), "must be");
855 if (RegeneratedClasses::is_regenerated_object(ik)) {
856 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
857 precond(orig_ik->is_initialized());
858 orig_mirror = orig_ik->java_mirror();
859 } else {
860 precond(ik->is_initialized());
861 orig_mirror = ik->java_mirror();
862 }
863
864 oop m = scratch_java_mirror(ik);
865 int nfields = 0;
866 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
867 if (fs.access_flags().is_static()) {
868 fieldDescriptor& fd = fs.field_descriptor();
869 int offset = fd.offset();
870 switch (fd.field_type()) {
871 case T_OBJECT:
872 case T_ARRAY:
873 {
874 oop field_obj = orig_mirror->obj_field(offset);
875 m->obj_field_put(offset, field_obj);
876 if (field_obj != nullptr) {
877 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
878 assert(success, "sanity");
879 }
880 }
881 break;
882 case T_BOOLEAN:
883 m->bool_field_put(offset, orig_mirror->bool_field(offset));
884 break;
885 case T_BYTE:
886 m->byte_field_put(offset, orig_mirror->byte_field(offset));
887 break;
888 case T_SHORT:
889 m->short_field_put(offset, orig_mirror->short_field(offset));
890 break;
891 case T_CHAR:
892 m->char_field_put(offset, orig_mirror->char_field(offset));
893 break;
894 case T_INT:
929 // We need to retain the identity_hash, because it may have been used by some hashtables
930 // in the shared heap.
931 if (!orig_mirror->fast_no_hash_check()) {
932 intptr_t src_hash = orig_mirror->identity_hash();
933 if (UseCompactObjectHeaders) {
934 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
935 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
936 } else {
937 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
938 }
939 assert(scratch_m->mark().is_unlocked(), "sanity");
940
941 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
942 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
943 }
944
945 if (CDSConfig::is_dumping_aot_linked_classes()) {
946 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
947 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
948 }
949
950 Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
951 if (CDSConfig::is_dumping_reflection_data() &&
952 k != nullptr && k->is_instance_klass() &&
953 java_lang_Class::reflection_data(orig_mirror) != nullptr &&
954 AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
955 java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
956 }
957 }
958
959 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
960 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
961 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
962 if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
963 return HeapShared::scratch_resolved_references(src_ik->constants());
964 }
965 }
966 return nullptr;
967 }
968
969 void HeapShared::archive_strings() {
970 assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
971 oop shared_strings_array = StringTable::init_shared_strings_array();
972 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
973 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
974 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
975 }
976
987 has_oop_pointers = info->has_oop_pointers();
988 has_native_pointers = info->has_native_pointers();
989 }
990
991 void HeapShared::set_has_native_pointers(oop src_obj) {
992 OopHandle oh(&src_obj);
993 CachedOopInfo* info = archived_object_cache()->get(oh);
994 assert(info != nullptr, "must be");
995 info->set_has_native_pointers();
996 }
997
998 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
999 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
1000 void HeapShared::start_scanning_for_oops() {
1001 {
1002 NoSafepointVerifier nsv;
1003
1004 // The special subgraph doesn't belong to any class. We use Object_klass() here just
1005 // for convenience.
1006 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
1007 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
1008
1009 // Cache for recording where the archived objects are copied to
1010 create_archived_object_cache();
1011
1012 if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {
1013 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
1014 UseCompressedOops ? p2i(CompressedOops::begin()) :
1015 p2i((address)G1CollectedHeap::heap()->reserved().start()),
1016 UseCompressedOops ? p2i(CompressedOops::end()) :
1017 p2i((address)G1CollectedHeap::heap()->reserved().end()));
1018 }
1019
1020 archive_subgraphs();
1021 }
1022
1023 init_seen_objects_table();
1024 Universe::archive_exception_instances();
1025 }
1026
1027 void HeapShared::end_scanning_for_oops() {
1028 if (is_writing_mapping_mode()) {
1029 archive_strings();
1030 }
1031 delete_seen_objects_table();
1032 }
1033
1034 void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
1035 {
1036 NoSafepointVerifier nsv;
1037 if (!SkipArchiveHeapVerification) {
1038 CDSHeapVerifier::verify();
1039 }
1040 check_special_subgraph_classes();
1041 }
1042
1043 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
1044 for (int i = 0; i < _pending_roots->length(); i++) {
1045 roots->append(_pending_roots->at(i).resolve());
1046 }
1047
1048 if (HeapShared::is_writing_mapping_mode()) {
1049 StringTable::write_shared_table();
1050 AOTMappedHeapWriter::write(roots, mapped_heap_info);
1051 } else {
1052 assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
1053 AOTStreamedHeapWriter::write(roots, streamed_heap_info);
1054 }
1055
1056 delete roots;
1057
1058 ArchiveBuilder::OtherROAllocMark mark;
1059 write_subgraph_info_table();
1060 }
1061
1062 void HeapShared::scan_java_mirror(oop orig_mirror) {
1063 oop m = scratch_java_mirror(orig_mirror);
1064 if (m != nullptr) { // nullptr if for custom class loader
1065 copy_java_mirror(orig_mirror, m);
1066 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
1067 assert(success, "sanity");
1068
1069 oop extra;
1070 if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
1071 success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
1072 assert(success, "sanity");
1073 }
1074 }
1075 }
1076
1077 void HeapShared::scan_java_class(Klass* orig_k) {
1078 scan_java_mirror(orig_k->java_mirror());
1079
1080 if (orig_k->is_instance_klass()) {
1081 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
1082 orig_ik->constants()->prepare_resolved_references_for_archiving();
1083 objArrayOop rr = get_archived_resolved_references(orig_ik);
1084 if (rr != nullptr) {
1085 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
1086 assert(success, "must be");
1087 }
1088 }
1089 }
1090
1091 void HeapShared::archive_subgraphs() {
1092 assert(CDSConfig::is_dumping_heap(), "must be");
1093
1118 &created);
1119 assert(created, "must not initialize twice");
1120 return info;
1121 }
1122
1123 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
1124 assert(CDSConfig::is_dumping_heap(), "dump time only");
1125 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
1126 assert(info != nullptr, "must have been initialized");
1127 return info;
1128 }
1129
1130 // Add an entry field to the current KlassSubGraphInfo.
1131 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
1132 assert(CDSConfig::is_dumping_heap(), "dump time only");
1133 if (_subgraph_entry_fields == nullptr) {
1134 _subgraph_entry_fields =
1135 new (mtClass) GrowableArray<int>(10, mtClass);
1136 }
1137 _subgraph_entry_fields->append(static_field_offset);
1138 if (v == nullptr) {
1139 _subgraph_entry_fields->append(-1);
1140 } else {
1141 _subgraph_entry_fields->append(HeapShared::append_root(v));
1142 }
1143 }
1144
1145 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
1146 // Only objects of boot classes can be included in sub-graph.
1147 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
1148 assert(CDSConfig::is_dumping_heap(), "dump time only");
1149
1150 if (_subgraph_object_klasses == nullptr) {
1151 _subgraph_object_klasses =
1152 new (mtClass) GrowableArray<Klass*>(50, mtClass);
1153 }
1154
1155 if (_k == orig_k) {
1156 // Don't add the Klass containing the sub-graph to it's own klass
1157 // initialization list.
1158 return;
1159 }
1160
1161 if (orig_k->is_instance_klass()) {
1162 #ifdef ASSERT
1439 which, k->external_name());
1440 FlagSetting fs1(VerifyBeforeGC, true);
1441 FlagSetting fs2(VerifyDuringGC, true);
1442 FlagSetting fs3(VerifyAfterGC, true);
1443 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1444 }
1445 }
1446 }
1447
1448 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1449 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1450 //
1451 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1452 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1453 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1454 void HeapShared::resolve_classes(JavaThread* current) {
1455 assert(CDSConfig::is_using_archive(), "runtime only!");
1456 if (!is_archived_heap_in_use()) {
1457 return; // nothing to do
1458 }
1459
1460 if (!CDSConfig::is_using_aot_linked_classes()) {
1461 assert( _run_time_special_subgraph != nullptr, "must be");
1462 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1463 if (klasses != nullptr) {
1464 for (int i = 0; i < klasses->length(); i++) {
1465 Klass* k = klasses->at(i);
1466 ExceptionMark em(current); // no exception can happen here
1467 resolve_or_init(k, /*do_init*/false, current);
1468 }
1469 }
1470 }
1471
1472 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1473 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1474 }
1475
1476 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1477 for (int i = 0; fields[i].valid(); i++) {
1478 ArchivableStaticFieldInfo* info = &fields[i];
1479 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1480 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1481 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1482 resolve_classes_for_subgraph_of(current, k);
1483 }
1484 }
1485
1486 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1487 JavaThread* THREAD = current;
1488 ExceptionMark em(THREAD);
1489 const ArchivedKlassSubGraphInfoRecord* record =
1490 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1491 if (HAS_PENDING_EXCEPTION) {
1492 CLEAR_PENDING_EXCEPTION;
1493 }
1494 if (record == nullptr) {
1495 clear_archived_roots_of(k);
1496 }
1497 }
1498
1499 static const char* java_lang_invoke_core_klasses[] = {
1500 "java/lang/invoke/Invokers$Holder",
1501 "java/lang/invoke/MethodHandle",
1502 "java/lang/invoke/MethodHandleNatives",
1503 "java/lang/invoke/DirectMethodHandle$Holder",
1504 "java/lang/invoke/DelegatingMethodHandle$Holder",
1505 "java/lang/invoke/LambdaForm$Holder",
1506 "java/lang/invoke/BoundMethodHandle$Species_L",
1507 };
1508
1509 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1510 if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1511 int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
1512 for (int i = 0; i < len; i++) {
1513 resolve_or_init(java_lang_invoke_core_klasses[i], true, CHECK);
1514 }
1515 }
1516 }
1517
1518 bool HeapShared::is_core_java_lang_invoke_klass(InstanceKlass* klass) {
1519 // TODO: Crude, rewrite using Symbols or vmClasses instead
1520 ResourceMark rm;
1521 char* s2 = klass->name()->as_C_string();
1522 int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
1523 for (int i = 0; i < len; i++) {
1524 if (strcmp(java_lang_invoke_core_klasses[i], s2) == 0) {
1525 return true;
1526 }
1527 }
1528 return false;
1529 }
1530
1531 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1532 // - interned strings
1533 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1534 // - ConstantPool::resolved_references()
1535 // - Universe::<xxx>_exception_instance()
1536 //
1537 // For example, if this enum class is initialized at AOT cache assembly time:
1538 //
1539 // enum Fruit {
1540 // APPLE, ORANGE, BANANA;
1541 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1542 // }
1543 //
1544 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1545 // should be initialized before any Java code can access the Fruit class. Note that
1546 // HashSet itself doesn't necessary need to be an aot-initialized class.
1547 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
1548 if (!is_archived_heap_in_use()) {
1705 ik->initialize(CHECK);
1706 } else if (k->is_objArray_klass()) {
1707 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1708 oak->initialize(CHECK);
1709 }
1710 }
1711 }
1712
1713 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1714 verify_the_heap(k, "before");
1715
1716 Array<int>* entry_field_records = record->entry_field_records();
1717 if (entry_field_records != nullptr) {
1718 int efr_len = entry_field_records->length();
1719 assert(efr_len % 2 == 0, "sanity");
1720 for (int i = 0; i < efr_len; i += 2) {
1721 int field_offset = entry_field_records->at(i);
1722 int root_index = entry_field_records->at(i+1);
1723 // Load the subgraph entry fields from the record and store them back to
1724 // the corresponding fields within the mirror.
1725 oop v;
1726 if (root_index < 0) {
1727 v = nullptr;
1728 } else {
1729 v = get_root(root_index, /*clear=*/true);
1730 }
1731 oop m = k->java_mirror();
1732 if (k->has_aot_initialized_mirror()) {
1733 assert(v == m->obj_field(field_offset), "must be aot-initialized");
1734 } else {
1735 m->obj_field_put(field_offset, v);
1736 }
1737 log_debug(aot, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1738 }
1739
1740 // Done. Java code can see the archived sub-graphs referenced from k's
1741 // mirror after this point.
1742 if (log_is_enabled(Info, aot, heap)) {
1743 ResourceMark rm;
1744 log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1745 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1746 k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1747 }
1748 }
1749
1750 verify_the_heap(k, "after ");
1852 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1853 PointsToOopsChecker points_to_oops_checker;
1854 obj->oop_iterate(&points_to_oops_checker);
1855 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1856 }
1857
1858 void HeapShared::init_box_classes(TRAPS) {
1859 if (is_archived_heap_in_use()) {
1860 vmClasses::Boolean_klass()->initialize(CHECK);
1861 vmClasses::Character_klass()->initialize(CHECK);
1862 vmClasses::Float_klass()->initialize(CHECK);
1863 vmClasses::Double_klass()->initialize(CHECK);
1864 vmClasses::Byte_klass()->initialize(CHECK);
1865 vmClasses::Short_klass()->initialize(CHECK);
1866 vmClasses::Integer_klass()->initialize(CHECK);
1867 vmClasses::Long_klass()->initialize(CHECK);
1868 vmClasses::Void_klass()->initialize(CHECK);
1869 }
1870 }
1871
1872 void HeapShared::exit_on_error() {
1873 if (_context != nullptr) {
1874 ResourceMark rm;
1875 LogStream ls(Log(cds, heap)::error());
1876 ls.print_cr("Context");
1877 for (int i = 0; i < _context->length(); i++) {
1878 const char* s = _context->at(i);
1879 ls.print_cr("- %s", s);
1880 }
1881 }
1882 debug_trace();
1883 AOTMetaspace::unrecoverable_writing_error();
1884 }
1885
1886 // (1) If orig_obj has not been archived yet, archive it.
1887 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1888 // trace all objects that are reachable from it, and make sure these objects are archived.
1889 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1890 // were already archived when this function is called)
1891 bool HeapShared::archive_reachable_objects_from(int level,
1892 KlassSubGraphInfo* subgraph_info,
1893 oop orig_obj) {
1894 assert(orig_obj != nullptr, "must be");
1895 PendingOopStack stack;
1896 stack.push(PendingOop(orig_obj, nullptr, level));
1897
1898 while (stack.length() > 0) {
1899 PendingOop po = stack.pop();
1900 _object_being_archived = po;
1901 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1902 _object_being_archived = PendingOop();
1903
1904 if (!status) {
1905 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
2132 verify_subgraph_from(f);
2133 }
2134 }
2135
2136 void HeapShared::verify_subgraph_from(oop orig_obj) {
2137 if (!has_been_archived(orig_obj)) {
2138 // It's OK for the root of a subgraph to be not archived. See comments in
2139 // archive_reachable_objects_from().
2140 return;
2141 }
2142
2143 // Verify that all objects reachable from orig_obj are archived.
2144 init_seen_objects_table();
2145 verify_reachable_objects_from(orig_obj);
2146 delete_seen_objects_table();
2147 }
2148
2149 void HeapShared::verify_reachable_objects_from(oop obj) {
2150 _num_total_verifications ++;
2151 if (java_lang_Class::is_instance(obj)) {
2152 Klass* k = java_lang_Class::as_Klass(obj);
2153 if (RegeneratedClasses::has_been_regenerated(k)) {
2154 k = RegeneratedClasses::get_regenerated_object(k);
2155 obj = k->java_mirror();
2156 }
2157 obj = scratch_java_mirror(obj);
2158 assert(obj != nullptr, "must be");
2159 }
2160 if (!has_been_seen_during_subgraph_recording(obj)) {
2161 set_has_been_seen_during_subgraph_recording(obj);
2162 assert(has_been_archived(obj), "must be");
2163 VerifySharedOopClosure walker;
2164 obj->oop_iterate(&walker);
2165 }
2166 }
2167 #endif
2168
2169 void HeapShared::check_special_subgraph_classes() {
2170 if (CDSConfig::is_initing_classes_at_dump_time()) {
2171 // We can have aot-initialized classes (such as Enums) that can reference objects
2172 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2173 // aot-initialize classes that are "safe".
2174 //
2175 // TODO: we need an automatic tool that checks the safety of aot-initialized
2176 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2457
2458 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2459 bool is_full_module_graph) {
2460 _num_total_subgraph_recordings = 0;
2461 _num_total_walked_objs = 0;
2462 _num_total_archived_objs = 0;
2463 _num_total_recorded_klasses = 0;
2464 _num_total_verifications = 0;
2465
2466 // For each class X that has one or more archived fields:
2467 // [1] Dump the subgraph of each archived field
2468 // [2] Create a list of all the class of the objects that can be reached
2469 // by any of these static fields.
2470 // At runtime, these classes are initialized before X's archived fields
2471 // are restored by HeapShared::initialize_from_archived_subgraph().
2472 for (int i = 0; fields[i].valid(); ) {
2473 ArchivableStaticFieldInfo* info = &fields[i];
2474 const char* klass_name = info->klass_name;
2475 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2476
2477 ContextMark cm(klass_name);
2478 // If you have specified consecutive fields of the same klass in
2479 // fields[], these will be archived in the same
2480 // {start_recording_subgraph ... done_recording_subgraph} pass to
2481 // save time.
2482 for (; fields[i].valid(); i++) {
2483 ArchivableStaticFieldInfo* f = &fields[i];
2484 if (f->klass_name != klass_name) {
2485 break;
2486 }
2487
2488 ContextMark cm(f->field_name);
2489 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2490 f->offset, f->field_name);
2491 }
2492 done_recording_subgraph(info->klass, klass_name);
2493 }
2494
2495 log_info(aot, heap)("Archived subgraph records = %zu",
2496 _num_total_subgraph_recordings);
2497 log_info(aot, heap)(" Walked %zu objects", _num_total_walked_objs);
2498 log_info(aot, heap)(" Archived %zu objects", _num_total_archived_objs);
2499 log_info(aot, heap)(" Recorded %zu klasses", _num_total_recorded_klasses);
2500
2501 #ifndef PRODUCT
2502 for (int i = 0; fields[i].valid(); i++) {
2503 ArchivableStaticFieldInfo* f = &fields[i];
2504 verify_subgraph_from_static_field(f->klass, f->offset);
2505 }
2506 log_info(aot, heap)(" Verified %zu references", _num_total_verifications);
2507 #endif
2508 }
|