6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotMappedHeapLoader.hpp"
30 #include "cds/aotMappedHeapWriter.hpp"
31 #include "cds/aotMetaspace.hpp"
32 #include "cds/aotOopChecker.hpp"
33 #include "cds/aotReferenceObjSupport.hpp"
34 #include "cds/aotStreamedHeapLoader.hpp"
35 #include "cds/aotStreamedHeapWriter.hpp"
36 #include "cds/archiveBuilder.hpp"
37 #include "cds/archiveUtils.hpp"
38 #include "cds/cds_globals.hpp"
39 #include "cds/cdsConfig.hpp"
40 #include "cds/cdsEnumKlass.hpp"
41 #include "cds/cdsHeapVerifier.hpp"
42 #include "cds/heapShared.inline.hpp"
43 #include "cds/regeneratedClasses.hpp"
44 #include "classfile/classLoaderData.hpp"
45 #include "classfile/javaClasses.inline.hpp"
46 #include "classfile/modules.hpp"
47 #include "classfile/stringTable.hpp"
48 #include "classfile/symbolTable.hpp"
49 #include "classfile/systemDictionary.hpp"
50 #include "classfile/systemDictionaryShared.hpp"
51 #include "classfile/vmClasses.hpp"
52 #include "classfile/vmSymbols.hpp"
53 #include "gc/shared/collectedHeap.hpp"
54 #include "gc/shared/gcLocker.hpp"
55 #include "gc/shared/gcVMOperations.hpp"
56 #include "logging/log.hpp"
57 #include "logging/logStream.hpp"
58 #include "memory/iterator.inline.hpp"
59 #include "memory/resourceArea.hpp"
60 #include "memory/universe.hpp"
61 #include "oops/compressedOops.inline.hpp"
62 #include "oops/fieldStreams.inline.hpp"
63 #include "oops/objArrayOop.inline.hpp"
64 #include "oops/oop.inline.hpp"
65 #include "oops/oopHandle.inline.hpp"
66 #include "oops/typeArrayOop.inline.hpp"
67 #include "prims/jvmtiExport.hpp"
68 #include "runtime/arguments.hpp"
69 #include "runtime/fieldDescriptor.inline.hpp"
70 #include "runtime/globals_extension.hpp"
71 #include "runtime/init.hpp"
72 #include "runtime/javaCalls.hpp"
78 #include "gc/g1/g1CollectedHeap.hpp"
79 #endif
80
81 #if INCLUDE_CDS_JAVA_HEAP
82
83 struct ArchivableStaticFieldInfo {
84 const char* klass_name;
85 const char* field_name;
86 InstanceKlass* klass;
87 int offset;
88 BasicType type;
89
90 ArchivableStaticFieldInfo(const char* k, const char* f)
91 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
92
93 bool valid() {
94 return klass_name != nullptr;
95 }
96 };
97
98 // Anything that goes in the header must be thoroughly purged from uninitialized memory
99 // as it will be written to disk. Therefore, the constructors memset the memory to 0.
100 // This is not the prettiest thing, but we need to know every byte is initialized,
101 // including potential padding between fields.
102
103 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
104 size_t oopmap_start_pos,
105 HeapRootSegments root_segments) {
106 memset((char*)this, 0, sizeof(*this));
107 _ptrmap_start_pos = ptrmap_start_pos;
108 _oopmap_start_pos = oopmap_start_pos;
109 _root_segments = root_segments;
110 }
111
112 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
113 memset((char*)this, 0, sizeof(*this));
114 }
115
116 ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
117 return ArchiveMappedHeapHeader{_ptrmap_start_pos,
161 #endif
162
163
164 //
165 // If you add new entries to the following tables, you should know what you're doing!
166 //
167
168 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
169 {"java/lang/Integer$IntegerCache", "archivedCache"},
170 {"java/lang/Long$LongCache", "archivedCache"},
171 {"java/lang/Byte$ByteCache", "archivedCache"},
172 {"java/lang/Short$ShortCache", "archivedCache"},
173 {"java/lang/Character$CharacterCache", "archivedCache"},
174 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
175 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
176 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
177 {"java/util/ImmutableCollections", "archivedObjects"},
178 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
179 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
180 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
181
182 #ifndef PRODUCT
183 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
184 #endif
185 {nullptr, nullptr},
186 };
187
188 // full module graph
189 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
190 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
191 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
192 {"java/lang/Module$ArchivedData", "archivedData"},
193 {nullptr, nullptr},
194 };
195
196 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
197 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
198 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
199 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
200 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
201
202 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
203 for (int i = 0; fields[i].valid(); i++) {
204 if (fields[i].klass == ik) {
205 return true;
206 }
207 }
208 return false;
209 }
210
211 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
212 assert(CDSConfig::is_dumping_heap(), "dump-time only");
213 if (CDSConfig::is_dumping_klass_subgraphs()) {
214 // Legacy CDS archive support (to be deprecated)
215 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
216 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
217 } else {
218 return false;
219 }
220 }
221
222 oop HeapShared::CachedOopInfo::orig_referrer() const {
223 return _orig_referrer.resolve();
224 }
225
226 unsigned HeapShared::oop_hash(oop const& p) {
227 assert(SafepointSynchronize::is_at_safepoint() ||
228 JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
229 // Do not call p->identity_hash() as that will update the
230 // object header.
231 return primitive_hash(cast_from_oop<intptr_t>(p));
232 }
233
234 unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
235 return oop_hash(oh.resolve());
236 }
237
238 unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
239 oop o = oh.resolve();
240 if (o == nullptr) {
241 return 0;
242 } else {
243 return o->identity_hash();
244 }
245 }
246
247 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
248 return a.resolve() == b.resolve();
249 }
250
251 static void reset_states(oop obj, TRAPS) {
252 Handle h_obj(THREAD, obj);
253 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
254 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
255 Symbol* method_sig = vmSymbols::void_method_signature();
256
257 while (klass != nullptr) {
258 Method* method = klass->find_method(method_name, method_sig);
259 if (method != nullptr) {
260 assert(method->is_private(), "must be");
261 if (log_is_enabled(Debug, aot)) {
262 ResourceMark rm(THREAD);
263 log_debug(aot)(" calling %s", method->name_and_sig_as_C_string());
279
280 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not
281 // directly used for class loading, but rather is used by the core library
282 // to keep track of resources, etc, loaded by the null class loader.
283 //
284 // Note, this object is non-null, and is not the same as
285 // ClassLoaderData::the_null_class_loader_data()->class_loader(),
286 // which is null.
287 log_debug(aot)("Resetting boot loader");
288 JavaValue result(T_OBJECT);
289 JavaCalls::call_static(&result,
290 vmClasses::jdk_internal_loader_ClassLoaders_klass(),
291 vmSymbols::bootLoader_name(),
292 vmSymbols::void_BuiltinClassLoader_signature(),
293 CHECK);
294 Handle boot_loader(THREAD, result.get_oop());
295 reset_states(boot_loader(), CHECK);
296 }
297
298 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
299
300 bool HeapShared::is_archived_heap_in_use() {
301 if (HeapShared::is_loading()) {
302 if (HeapShared::is_loading_streaming_mode()) {
303 return AOTStreamedHeapLoader::is_in_use();
304 } else {
305 return AOTMappedHeapLoader::is_in_use();
306 }
307 }
308
309 return false;
310 }
311
312 bool HeapShared::can_use_archived_heap() {
313 FileMapInfo* static_mapinfo = FileMapInfo::current_info();
314 if (static_mapinfo == nullptr) {
315 return false;
316 }
317 if (!static_mapinfo->has_heap_region()) {
318 return false;
393
394 void HeapShared::initialize_streaming() {
395 assert(is_loading_streaming_mode(), "shouldn't call this");
396 if (can_use_archived_heap()) {
397 AOTStreamedHeapLoader::initialize();
398 }
399 }
400
401 void HeapShared::enable_gc() {
402 if (AOTStreamedHeapLoader::is_in_use()) {
403 AOTStreamedHeapLoader::enable_gc();
404 }
405 }
406
407 void HeapShared::materialize_thread_object() {
408 if (AOTStreamedHeapLoader::is_in_use()) {
409 AOTStreamedHeapLoader::materialize_thread_object();
410 }
411 }
412
413 void HeapShared::add_to_dumped_interned_strings(oop string) {
414 assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
415 AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
416 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
417 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
418 }
419
420 void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
421 if (HeapShared::is_loading()) {
422 if (HeapShared::is_loading_streaming_mode()) {
423 // Heap initialization can be done only after vtables are initialized by ReadClosure.
424 AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
425 } else {
426 // Finish up archived heap initialization. These must be
427 // done after ReadClosure.
428 AOTMappedHeapLoader::finish_initialization(static_mapinfo);
429 }
430 }
431 }
432
433 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
434 OopHandle oh(Universe::vm_global(), obj);
435 CachedOopInfo* result = _archived_object_cache->get(oh);
436 oh.release(Universe::vm_global());
437 return result;
438 }
439
440 bool HeapShared::has_been_archived(oop obj) {
441 assert(CDSConfig::is_dumping_heap(), "dump-time only");
442 return get_cached_oop_info(obj) != nullptr;
443 }
444
445 int HeapShared::append_root(oop obj) {
446 assert(CDSConfig::is_dumping_heap(), "dump-time only");
447 if (obj != nullptr) {
448 assert(has_been_archived(obj), "must be");
449 }
450 // No GC should happen since we aren't scanning _pending_roots.
451 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
452
453 return _pending_roots->append(obj);
454 }
455
456 oop HeapShared::get_root(int index, bool clear) {
457 assert(index >= 0, "sanity");
458 assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
459
460 oop result;
461 if (HeapShared::is_loading_streaming_mode()) {
462 result = AOTStreamedHeapLoader::get_root(index);
463 } else {
464 assert(HeapShared::is_loading_mapping_mode(), "must be");
465 result = AOTMappedHeapLoader::get_root(index);
466 }
467
468 if (clear) {
469 clear_root(index);
470 }
471
472 return result;
473 }
474
475 void HeapShared::finish_materialize_objects() {
476 if (AOTStreamedHeapLoader::is_in_use()) {
477 AOTStreamedHeapLoader::finish_materialize_objects();
478 }
479 }
480
481 void HeapShared::clear_root(int index) {
482 assert(index >= 0, "sanity");
483 assert(CDSConfig::is_using_archive(), "must be");
484 if (is_archived_heap_in_use()) {
485 if (log_is_enabled(Debug, aot, heap)) {
486 log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));
487 }
488 if (HeapShared::is_loading_streaming_mode()) {
489 AOTStreamedHeapLoader::clear_root(index);
490 } else {
491 assert(HeapShared::is_loading_mapping_mode(), "must be");
492 AOTMappedHeapLoader::clear_root(index);
493 }
494 }
495 }
496
497 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
498 assert(CDSConfig::is_dumping_heap(), "dump-time only");
499
500 assert(!obj->is_stackChunk(), "do not archive stack chunks");
501 if (has_been_archived(obj)) {
542 // returned from jdk.internal.misc.CDS::initializeFromArchive().
543 // See HeapShared::initialize_from_archived_subgraph().
544 {
545 AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
546 }
547
548 if (java_lang_Class::is_instance(obj)) {
549 Klass* mirror_k = java_lang_Class::as_Klass(obj);
550 if (mirror_k != nullptr) {
551 AOTArtifactFinder::add_cached_class(mirror_k);
552 }
553 } else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
554 Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
555 if (m != nullptr) {
556 if (RegeneratedClasses::has_been_regenerated(m)) {
557 m = RegeneratedClasses::get_regenerated_object(m);
558 }
559 InstanceKlass* method_holder = m->method_holder();
560 AOTArtifactFinder::add_cached_class(method_holder);
561 }
562 }
563 }
564
565 if (log_is_enabled(Debug, aot, heap)) {
566 ResourceMark rm;
567 LogTarget(Debug, aot, heap) log;
568 LogStream out(log);
569 out.print("Archived heap object " PTR_FORMAT " : %s ",
570 p2i(obj), obj->klass()->external_name());
571 if (java_lang_Class::is_instance(obj)) {
572 Klass* k = java_lang_Class::as_Klass(obj);
573 if (k != nullptr) {
574 out.print("%s", k->external_name());
575 } else {
576 out.print("primitive");
577 }
578 }
579 out.cr();
580 }
581
609 remove(ptr);
610 }
611 }
612 };
613
614 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
615 if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
616 // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
617 // Ignore it, as this class will be excluded from the AOT config.
618 return;
619 }
620 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
621 _scratch_objects_table->set_oop(src, dest);
622 }
623 }
624
625 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
626 return (objArrayOop)_scratch_objects_table->get_oop(src);
627 }
628
629 void HeapShared::init_dumping() {
630 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
631 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
632 }
633
634 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
635 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
636 BasicType bt = (BasicType)i;
637 if (!is_reference_type(bt)) {
638 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
639 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
640 }
641 }
642 }
643
644 // Given java_mirror that represents a (primitive or reference) type T,
645 // return the "scratch" version that represents the same type T. Note
646 // that java_mirror will be returned if the mirror is already a scratch mirror.
647 //
648 // See java_lang_Class::create_scratch_mirror() for more info.
649 oop HeapShared::scratch_java_mirror(oop java_mirror) {
650 assert(java_lang_Class::is_instance(java_mirror), "must be");
651
728 if (RegeneratedClasses::is_regenerated_object(ik)) {
729 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
730 precond(orig_ik->is_initialized());
731 orig_mirror = orig_ik->java_mirror();
732 } else {
733 precond(ik->is_initialized());
734 orig_mirror = ik->java_mirror();
735 }
736
737 oop m = scratch_java_mirror(ik);
738 int nfields = 0;
739 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
740 if (fs.access_flags().is_static()) {
741 fieldDescriptor& fd = fs.field_descriptor();
742 int offset = fd.offset();
743 switch (fd.field_type()) {
744 case T_OBJECT:
745 case T_ARRAY:
746 {
747 oop field_obj = orig_mirror->obj_field(offset);
748 if (offset == java_lang_Class::reflection_data_offset()) {
749 // Class::reflectData use SoftReference, which cannot be archived. Set it
750 // to null and it will be recreated at runtime.
751 field_obj = nullptr;
752 }
753 m->obj_field_put(offset, field_obj);
754 if (field_obj != nullptr) {
755 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
756 assert(success, "sanity");
757 }
758 }
759 break;
760 case T_BOOLEAN:
761 m->bool_field_put(offset, orig_mirror->bool_field(offset));
762 break;
763 case T_BYTE:
764 m->byte_field_put(offset, orig_mirror->byte_field(offset));
765 break;
766 case T_SHORT:
767 m->short_field_put(offset, orig_mirror->short_field(offset));
768 break;
769 case T_CHAR:
770 m->char_field_put(offset, orig_mirror->char_field(offset));
771 break;
772 case T_INT:
807 // We need to retain the identity_hash, because it may have been used by some hashtables
808 // in the shared heap.
809 if (!orig_mirror->fast_no_hash_check()) {
810 intptr_t src_hash = orig_mirror->identity_hash();
811 if (UseCompactObjectHeaders) {
812 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
813 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
814 } else {
815 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
816 }
817 assert(scratch_m->mark().is_unlocked(), "sanity");
818
819 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
820 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
821 }
822
823 if (CDSConfig::is_dumping_aot_linked_classes()) {
824 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
825 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
826 }
827 }
828
829 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
830 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
831 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
832 if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
833 return HeapShared::scratch_resolved_references(src_ik->constants());
834 }
835 }
836 return nullptr;
837 }
838
839 int HeapShared::archive_exception_instance(oop exception) {
840 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
841 assert(success, "sanity");
842 return append_root(exception);
843 }
844
845 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
846 OopHandle oh(&src_obj);
849 has_oop_pointers = info->has_oop_pointers();
850 has_native_pointers = info->has_native_pointers();
851 }
852
853 void HeapShared::set_has_native_pointers(oop src_obj) {
854 OopHandle oh(&src_obj);
855 CachedOopInfo* info = archived_object_cache()->get(oh);
856 assert(info != nullptr, "must be");
857 info->set_has_native_pointers();
858 }
859
860 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
861 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
862 void HeapShared::start_scanning_for_oops() {
863 {
864 NoSafepointVerifier nsv;
865
866 // The special subgraph doesn't belong to any class. We use Object_klass() here just
867 // for convenience.
868 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
869
870 // Cache for recording where the archived objects are copied to
871 create_archived_object_cache();
872
873 if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {
874 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
875 UseCompressedOops ? p2i(CompressedOops::begin()) :
876 p2i((address)G1CollectedHeap::heap()->reserved().start()),
877 UseCompressedOops ? p2i(CompressedOops::end()) :
878 p2i((address)G1CollectedHeap::heap()->reserved().end()));
879 }
880
881 archive_subgraphs();
882 }
883
884 init_seen_objects_table();
885 Universe::archive_exception_instances();
886 }
887
888 void HeapShared::end_scanning_for_oops() {
889 if (is_writing_mapping_mode()) {
890 StringTable::init_shared_table();
891 }
892 delete_seen_objects_table();
893 }
894
895 void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
896 {
897 NoSafepointVerifier nsv;
898 CDSHeapVerifier::verify();
899 check_special_subgraph_classes();
900 }
901
902 if (HeapShared::is_writing_mapping_mode()) {
903 StringTable::write_shared_table();
904 AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
905 } else {
906 assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
907 AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
908 }
909
910 ArchiveBuilder::OtherROAllocMark mark;
911 write_subgraph_info_table();
912 }
913
914 void HeapShared::scan_java_mirror(oop orig_mirror) {
915 oop m = scratch_java_mirror(orig_mirror);
916 if (m != nullptr) { // nullptr if for custom class loader
917 copy_java_mirror(orig_mirror, m);
918 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
919 assert(success, "sanity");
920 }
921 }
922
923 void HeapShared::scan_java_class(Klass* orig_k) {
924 scan_java_mirror(orig_k->java_mirror());
925
926 if (orig_k->is_instance_klass()) {
927 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
928 orig_ik->constants()->prepare_resolved_references_for_archiving();
929 objArrayOop rr = get_archived_resolved_references(orig_ik);
930 if (rr != nullptr) {
931 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
932 assert(success, "must be");
933 }
934 }
935 }
936
937 void HeapShared::archive_subgraphs() {
938 assert(CDSConfig::is_dumping_heap(), "must be");
939
964 &created);
965 assert(created, "must not initialize twice");
966 return info;
967 }
968
969 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
970 assert(CDSConfig::is_dumping_heap(), "dump time only");
971 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
972 assert(info != nullptr, "must have been initialized");
973 return info;
974 }
975
976 // Add an entry field to the current KlassSubGraphInfo.
977 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
978 assert(CDSConfig::is_dumping_heap(), "dump time only");
979 if (_subgraph_entry_fields == nullptr) {
980 _subgraph_entry_fields =
981 new (mtClass) GrowableArray<int>(10, mtClass);
982 }
983 _subgraph_entry_fields->append(static_field_offset);
984 _subgraph_entry_fields->append(HeapShared::append_root(v));
985 }
986
987 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
988 // Only objects of boot classes can be included in sub-graph.
989 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
990 assert(CDSConfig::is_dumping_heap(), "dump time only");
991
992 if (_subgraph_object_klasses == nullptr) {
993 _subgraph_object_klasses =
994 new (mtClass) GrowableArray<Klass*>(50, mtClass);
995 }
996
997 if (_k == orig_k) {
998 // Don't add the Klass containing the sub-graph to it's own klass
999 // initialization list.
1000 return;
1001 }
1002
1003 if (orig_k->is_instance_klass()) {
1004 #ifdef ASSERT
1307 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1308 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1309 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1310 resolve_classes_for_subgraph_of(current, k);
1311 }
1312 }
1313
1314 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1315 JavaThread* THREAD = current;
1316 ExceptionMark em(THREAD);
1317 const ArchivedKlassSubGraphInfoRecord* record =
1318 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1319 if (HAS_PENDING_EXCEPTION) {
1320 CLEAR_PENDING_EXCEPTION;
1321 }
1322 if (record == nullptr) {
1323 clear_archived_roots_of(k);
1324 }
1325 }
1326
1327 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1328 if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1329 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1330 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1331 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1332 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1333 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1334 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1335 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1336 }
1337 }
1338
1339 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1340 // - interned strings
1341 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1342 // - ConstantPool::resolved_references()
1343 // - Universe::<xxx>_exception_instance()
1344 //
1345 // For example, if this enum class is initialized at AOT cache assembly time:
1346 //
1347 // enum Fruit {
1348 // APPLE, ORANGE, BANANA;
1349 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1350 // }
1351 //
1352 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1353 // should be initialized before any Java code can access the Fruit class. Note that
1354 // HashSet itself doesn't necessary need to be an aot-initialized class.
1355 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
1356 if (!is_archived_heap_in_use()) {
1357 return;
1358 }
1513 ik->initialize(CHECK);
1514 } else if (k->is_objArray_klass()) {
1515 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1516 oak->initialize(CHECK);
1517 }
1518 }
1519 }
1520
1521 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1522 verify_the_heap(k, "before");
1523
1524 Array<int>* entry_field_records = record->entry_field_records();
1525 if (entry_field_records != nullptr) {
1526 int efr_len = entry_field_records->length();
1527 assert(efr_len % 2 == 0, "sanity");
1528 for (int i = 0; i < efr_len; i += 2) {
1529 int field_offset = entry_field_records->at(i);
1530 int root_index = entry_field_records->at(i+1);
1531 // Load the subgraph entry fields from the record and store them back to
1532 // the corresponding fields within the mirror.
1533 oop v = get_root(root_index, /*clear=*/true);
1534 oop m = k->java_mirror();
1535 if (k->has_aot_initialized_mirror()) {
1536 assert(v == m->obj_field(field_offset), "must be aot-initialized");
1537 } else {
1538 m->obj_field_put(field_offset, v);
1539 }
1540 log_debug(aot, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1541 }
1542
1543 // Done. Java code can see the archived sub-graphs referenced from k's
1544 // mirror after this point.
1545 if (log_is_enabled(Info, aot, heap)) {
1546 ResourceMark rm;
1547 log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1548 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1549 k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1550 }
1551 }
1552
1553 verify_the_heap(k, "after ");
1655 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1656 PointsToOopsChecker points_to_oops_checker;
1657 obj->oop_iterate(&points_to_oops_checker);
1658 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1659 }
1660
1661 void HeapShared::init_box_classes(TRAPS) {
1662 if (is_archived_heap_in_use()) {
1663 vmClasses::Boolean_klass()->initialize(CHECK);
1664 vmClasses::Character_klass()->initialize(CHECK);
1665 vmClasses::Float_klass()->initialize(CHECK);
1666 vmClasses::Double_klass()->initialize(CHECK);
1667 vmClasses::Byte_klass()->initialize(CHECK);
1668 vmClasses::Short_klass()->initialize(CHECK);
1669 vmClasses::Integer_klass()->initialize(CHECK);
1670 vmClasses::Long_klass()->initialize(CHECK);
1671 vmClasses::Void_klass()->initialize(CHECK);
1672 }
1673 }
1674
1675 // (1) If orig_obj has not been archived yet, archive it.
1676 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1677 // trace all objects that are reachable from it, and make sure these objects are archived.
1678 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1679 // were already archived when this function is called)
1680 bool HeapShared::archive_reachable_objects_from(int level,
1681 KlassSubGraphInfo* subgraph_info,
1682 oop orig_obj) {
1683 assert(orig_obj != nullptr, "must be");
1684 PendingOopStack stack;
1685 stack.push(PendingOop(orig_obj, nullptr, level));
1686
1687 while (stack.length() > 0) {
1688 PendingOop po = stack.pop();
1689 _object_being_archived = po;
1690 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1691 _object_being_archived = PendingOop();
1692
1693 if (!status) {
1694 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1921 verify_subgraph_from(f);
1922 }
1923 }
1924
1925 void HeapShared::verify_subgraph_from(oop orig_obj) {
1926 if (!has_been_archived(orig_obj)) {
1927 // It's OK for the root of a subgraph to be not archived. See comments in
1928 // archive_reachable_objects_from().
1929 return;
1930 }
1931
1932 // Verify that all objects reachable from orig_obj are archived.
1933 init_seen_objects_table();
1934 verify_reachable_objects_from(orig_obj);
1935 delete_seen_objects_table();
1936 }
1937
1938 void HeapShared::verify_reachable_objects_from(oop obj) {
1939 _num_total_verifications ++;
1940 if (java_lang_Class::is_instance(obj)) {
1941 obj = scratch_java_mirror(obj);
1942 assert(obj != nullptr, "must be");
1943 }
1944 if (!has_been_seen_during_subgraph_recording(obj)) {
1945 set_has_been_seen_during_subgraph_recording(obj);
1946 assert(has_been_archived(obj), "must be");
1947 VerifySharedOopClosure walker;
1948 obj->oop_iterate(&walker);
1949 }
1950 }
1951 #endif
1952
1953 void HeapShared::check_special_subgraph_classes() {
1954 if (CDSConfig::is_dumping_aot_linked_classes()) {
1955 // We can have aot-initialized classes (such as Enums) that can reference objects
1956 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1957 // aot-initialize classes that are "safe".
1958 //
1959 // TODO: we need an automatic tool that checks the safety of aot-initialized
1960 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2243
2244 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2245 bool is_full_module_graph) {
2246 _num_total_subgraph_recordings = 0;
2247 _num_total_walked_objs = 0;
2248 _num_total_archived_objs = 0;
2249 _num_total_recorded_klasses = 0;
2250 _num_total_verifications = 0;
2251
2252 // For each class X that has one or more archived fields:
2253 // [1] Dump the subgraph of each archived field
2254 // [2] Create a list of all the class of the objects that can be reached
2255 // by any of these static fields.
2256 // At runtime, these classes are initialized before X's archived fields
2257 // are restored by HeapShared::initialize_from_archived_subgraph().
2258 for (int i = 0; fields[i].valid(); ) {
2259 ArchivableStaticFieldInfo* info = &fields[i];
2260 const char* klass_name = info->klass_name;
2261 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2262
2263 // If you have specified consecutive fields of the same klass in
2264 // fields[], these will be archived in the same
2265 // {start_recording_subgraph ... done_recording_subgraph} pass to
2266 // save time.
2267 for (; fields[i].valid(); i++) {
2268 ArchivableStaticFieldInfo* f = &fields[i];
2269 if (f->klass_name != klass_name) {
2270 break;
2271 }
2272
2273 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2274 f->offset, f->field_name);
2275 }
2276 done_recording_subgraph(info->klass, klass_name);
2277 }
2278
2279 log_info(aot, heap)("Archived subgraph records = %zu",
2280 _num_total_subgraph_recordings);
2281 log_info(aot, heap)(" Walked %zu objects", _num_total_walked_objs);
2282 log_info(aot, heap)(" Archived %zu objects", _num_total_archived_objs);
2283 log_info(aot, heap)(" Recorded %zu klasses", _num_total_recorded_klasses);
2284
2285 #ifndef PRODUCT
2286 for (int i = 0; fields[i].valid(); i++) {
2287 ArchivableStaticFieldInfo* f = &fields[i];
2288 verify_subgraph_from_static_field(f->klass, f->offset);
2289 }
2290 log_info(aot, heap)(" Verified %zu references", _num_total_verifications);
2291 #endif
2292 }
2293
2294 bool HeapShared::is_dumped_interned_string(oop o) {
2295 if (is_writing_mapping_mode()) {
2296 return AOTMappedHeapWriter::is_dumped_interned_string(o);
2297 } else {
2298 return AOTStreamedHeapWriter::is_dumped_interned_string(o);
2299 }
2300 }
2301
2302 // These tables should be used only within the CDS safepoint, so
2303 // delete them before we exit the safepoint. Otherwise the table will
2304 // contain bad oops after a GC.
2305 void HeapShared::delete_tables_with_raw_oops() {
2306 assert(_seen_objects_table == nullptr, "should have been deleted");
2307
2308 if (is_writing_mapping_mode()) {
2309 AOTMappedHeapWriter::delete_tables_with_raw_oops();
2310 } else {
2311 assert(is_writing_streaming_mode(), "what other mode?");
2312 AOTStreamedHeapWriter::delete_tables_with_raw_oops();
2313 }
2314 }
2315
2316 void HeapShared::debug_trace() {
2317 ResourceMark rm;
2318 oop referrer = _object_being_archived.referrer();
2319 if (referrer != nullptr) {
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotCacheAccess.hpp"
27 #include "cds/aotClassInitializer.hpp"
28 #include "cds/aotClassLocation.hpp"
29 #include "cds/aotConstantPoolResolver.hpp"
30 #include "cds/aotLogging.hpp"
31 #include "cds/aotMappedHeapLoader.hpp"
32 #include "cds/aotMappedHeapWriter.hpp"
33 #include "cds/aotMetaspace.hpp"
34 #include "cds/aotOopChecker.hpp"
35 #include "cds/aotReferenceObjSupport.hpp"
36 #include "cds/aotStreamedHeapLoader.hpp"
37 #include "cds/aotStreamedHeapWriter.hpp"
38 #include "cds/archiveBuilder.hpp"
39 #include "cds/archiveUtils.hpp"
40 #include "cds/cds_globals.hpp"
41 #include "cds/cdsConfig.hpp"
42 #include "cds/cdsEnumKlass.hpp"
43 #include "cds/cdsHeapVerifier.hpp"
44 #include "cds/heapShared.inline.hpp"
45 #include "cds/regeneratedClasses.hpp"
46 #include "classfile/classLoaderData.hpp"
47 #include "classfile/javaClasses.inline.hpp"
48 #include "classfile/modules.hpp"
49 #include "classfile/stringTable.hpp"
50 #include "classfile/symbolTable.hpp"
51 #include "classfile/systemDictionary.hpp"
52 #include "classfile/systemDictionaryShared.hpp"
53 #include "classfile/vmClasses.hpp"
54 #include "classfile/vmSymbols.hpp"
55 #include "code/aotCodeCache.hpp"
56 #include "gc/shared/collectedHeap.hpp"
57 #include "gc/shared/gcLocker.hpp"
58 #include "gc/shared/gcVMOperations.hpp"
59 #include "logging/log.hpp"
60 #include "logging/logStream.hpp"
61 #include "memory/iterator.inline.hpp"
62 #include "memory/resourceArea.hpp"
63 #include "memory/universe.hpp"
64 #include "oops/compressedOops.inline.hpp"
65 #include "oops/fieldStreams.inline.hpp"
66 #include "oops/objArrayOop.inline.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "oops/oopHandle.inline.hpp"
69 #include "oops/typeArrayOop.inline.hpp"
70 #include "prims/jvmtiExport.hpp"
71 #include "runtime/arguments.hpp"
72 #include "runtime/fieldDescriptor.inline.hpp"
73 #include "runtime/globals_extension.hpp"
74 #include "runtime/init.hpp"
75 #include "runtime/javaCalls.hpp"
81 #include "gc/g1/g1CollectedHeap.hpp"
82 #endif
83
84 #if INCLUDE_CDS_JAVA_HEAP
85
86 struct ArchivableStaticFieldInfo {
87 const char* klass_name;
88 const char* field_name;
89 InstanceKlass* klass;
90 int offset;
91 BasicType type;
92
93 ArchivableStaticFieldInfo(const char* k, const char* f)
94 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
95
96 bool valid() {
97 return klass_name != nullptr;
98 }
99 };
100
101 class HeapShared::ContextMark : public StackObj {
102 ResourceMark rm;
103 public:
104 ContextMark(const char* c) : rm{} {
105 _context->push(c);
106 }
107 ~ContextMark() {
108 _context->pop();
109 }
110 };
111
112 // Anything that goes in the header must be thoroughly purged from uninitialized memory
113 // as it will be written to disk. Therefore, the constructors memset the memory to 0.
114 // This is not the prettiest thing, but we need to know every byte is initialized,
115 // including potential padding between fields.
116
117 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
118 size_t oopmap_start_pos,
119 HeapRootSegments root_segments) {
120 memset((char*)this, 0, sizeof(*this));
121 _ptrmap_start_pos = ptrmap_start_pos;
122 _oopmap_start_pos = oopmap_start_pos;
123 _root_segments = root_segments;
124 }
125
126 ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
127 memset((char*)this, 0, sizeof(*this));
128 }
129
130 ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
131 return ArchiveMappedHeapHeader{_ptrmap_start_pos,
175 #endif
176
177
178 //
179 // If you add new entries to the following tables, you should know what you're doing!
180 //
181
182 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
183 {"java/lang/Integer$IntegerCache", "archivedCache"},
184 {"java/lang/Long$LongCache", "archivedCache"},
185 {"java/lang/Byte$ByteCache", "archivedCache"},
186 {"java/lang/Short$ShortCache", "archivedCache"},
187 {"java/lang/Character$CharacterCache", "archivedCache"},
188 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
189 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
190 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
191 {"java/util/ImmutableCollections", "archivedObjects"},
192 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
193 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
194 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
195 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
196
197 #ifndef PRODUCT
198 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
199 #endif
200 {nullptr, nullptr},
201 };
202
203 // full module graph
204 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
205 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
206 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
207 {"java/lang/Module$ArchivedData", "archivedData"},
208 {nullptr, nullptr},
209 };
210
211 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
212 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
213 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
214 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
215 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
216 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
217
218 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
219 for (int i = 0; fields[i].valid(); i++) {
220 if (fields[i].klass == ik) {
221 return true;
222 }
223 }
224 return false;
225 }
226
227 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
228 assert(CDSConfig::is_dumping_heap(), "dump-time only");
229 if (CDSConfig::is_dumping_klass_subgraphs()) {
230 // Legacy CDS archive support (to be deprecated)
231 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
232 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
233 } else {
234 return false;
235 }
236 }
237
238 oop HeapShared::CachedOopInfo::orig_referrer() const {
239 return _orig_referrer.resolve();
240 }
241
242 unsigned HeapShared::oop_hash(oop const& p) {
243 assert(SafepointSynchronize::is_at_safepoint() ||
244 JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
245 // Do not call p->identity_hash() as that will update the
246 // object header.
247 return primitive_hash(cast_from_oop<intptr_t>(p));
248 }
249
250 // About the hashcode in the cached objects:
251 // - If a source object has a hashcode, it must be copied into the cache.
252 // That's because some cached hashtables are laid out using this hashcode.
253 // - If a source object doesn't have a hashcode, we avoid computing it while
254 // copying the objects into the cache. This will allow the hashcode to be
255 // dynamically and randomly computed in each production, which generally
256 // desirable to make the hashcodes more random between runs.
257 unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) {
258 oop o = oh.resolve();
259 if (_use_identity_hash_for_archived_object_cache) {
260 // This is called after all objects are copied. It's OK to update
261 // the object's hashcode.
262 //
263 // This may be called after we have left the AOT dumping safepoint.
264 // Objects in archived_object_cache() may be moved by the GC, so we
265 // can't use the address of o for computing the hash.
266 if (o == nullptr) {
267 return 0;
268 } else {
269 return o->identity_hash();
270 }
271 } else {
272 // This is called while we are copying the objects. Don't call o->identity_hash()
273 // as that will update the object header.
274 return oop_hash(o);
275 }
276 }
277
278 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
279 return a.resolve() == b.resolve();
280 }
281
282 static void reset_states(oop obj, TRAPS) {
283 Handle h_obj(THREAD, obj);
284 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
285 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
286 Symbol* method_sig = vmSymbols::void_method_signature();
287
288 while (klass != nullptr) {
289 Method* method = klass->find_method(method_name, method_sig);
290 if (method != nullptr) {
291 assert(method->is_private(), "must be");
292 if (log_is_enabled(Debug, aot)) {
293 ResourceMark rm(THREAD);
294 log_debug(aot)(" calling %s", method->name_and_sig_as_C_string());
310
311 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not
312 // directly used for class loading, but rather is used by the core library
313 // to keep track of resources, etc, loaded by the null class loader.
314 //
315 // Note, this object is non-null, and is not the same as
316 // ClassLoaderData::the_null_class_loader_data()->class_loader(),
317 // which is null.
318 log_debug(aot)("Resetting boot loader");
319 JavaValue result(T_OBJECT);
320 JavaCalls::call_static(&result,
321 vmClasses::jdk_internal_loader_ClassLoaders_klass(),
322 vmSymbols::bootLoader_name(),
323 vmSymbols::void_BuiltinClassLoader_signature(),
324 CHECK);
325 Handle boot_loader(THREAD, result.get_oop());
326 reset_states(boot_loader(), CHECK);
327 }
328
329 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
330 bool HeapShared::_use_identity_hash_for_archived_object_cache = false;
331
332 bool HeapShared::is_archived_heap_in_use() {
333 if (HeapShared::is_loading()) {
334 if (HeapShared::is_loading_streaming_mode()) {
335 return AOTStreamedHeapLoader::is_in_use();
336 } else {
337 return AOTMappedHeapLoader::is_in_use();
338 }
339 }
340
341 return false;
342 }
343
344 bool HeapShared::can_use_archived_heap() {
345 FileMapInfo* static_mapinfo = FileMapInfo::current_info();
346 if (static_mapinfo == nullptr) {
347 return false;
348 }
349 if (!static_mapinfo->has_heap_region()) {
350 return false;
425
426 void HeapShared::initialize_streaming() {
427 assert(is_loading_streaming_mode(), "shouldn't call this");
428 if (can_use_archived_heap()) {
429 AOTStreamedHeapLoader::initialize();
430 }
431 }
432
433 void HeapShared::enable_gc() {
434 if (AOTStreamedHeapLoader::is_in_use()) {
435 AOTStreamedHeapLoader::enable_gc();
436 }
437 }
438
439 void HeapShared::materialize_thread_object() {
440 if (AOTStreamedHeapLoader::is_in_use()) {
441 AOTStreamedHeapLoader::materialize_thread_object();
442 }
443 }
444
445 void HeapShared::archive_interned_string(oop string) {
446 assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
447 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
448 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
449 }
450
451 void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
452 if (HeapShared::is_loading()) {
453 if (HeapShared::is_loading_streaming_mode()) {
454 // Heap initialization can be done only after vtables are initialized by ReadClosure.
455 AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
456 } else {
457 // Finish up archived heap initialization. These must be
458 // done after ReadClosure.
459 AOTMappedHeapLoader::finish_initialization(static_mapinfo);
460 }
461 }
462 }
463
464 void HeapShared::make_archived_object_cache_gc_safe() {
465 ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
466
467 // It's safe to change the behavior of the hash function now, because iterate_all()
468 // doesn't call the hash function.
469 _use_identity_hash_for_archived_object_cache = true;
470
471 // Copy all CachedOopInfo into a new table using a different hashing algorithm
472 archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
473 new_cache->put_when_absent(oh, info);
474 });
475
476 destroy_archived_object_cache();
477 _archived_object_cache = new_cache;
478 }
479
480 HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
481 OopHandle oh(Universe::vm_global(), obj);
482 CachedOopInfo* result = _archived_object_cache->get(oh);
483 oh.release(Universe::vm_global());
484 return result;
485 }
486
487 bool HeapShared::has_been_archived(oop obj) {
488 assert(CDSConfig::is_dumping_heap(), "dump-time only");
489 return get_cached_oop_info(obj) != nullptr;
490 }
491
492 int HeapShared::append_root(oop obj) {
493 assert(SafepointSynchronize::is_at_safepoint(), "sanity");
494 assert(CDSConfig::is_dumping_heap(), "dump-time only");
495 assert(_pending_roots != nullptr, "sanity");
496
497 if (obj == nullptr) {
498 assert(_pending_roots->at(0) == nullptr, "root index 1 is always null");
499 return 0;
500 } else if (CDSConfig::is_dumping_aot_linked_classes()) {
501 // The AOT compiler may refer the same obj many times, so we
502 // should use the same index for this oop to avoid excessive entries
503 // in the roots array.
504 CachedOopInfo* obj_info = get_cached_oop_info(obj);
505 assert(obj_info != nullptr, "must be archived");
506
507 if (obj_info->root_index() > 0) {
508 return obj_info->root_index();
509 } else {
510 int i = _pending_roots->append(obj);
511 obj_info->set_root_index(i);
512 return i;
513 }
514 } else {
515 return _pending_roots->append(obj);
516 }
517 }
518
519 int HeapShared::get_root_index(oop obj) {
520 if (!CDSConfig::is_dumping_heap()) {
521 return -1; // Called by the Leyden old workflow
522 }
523
524 if (java_lang_Class::is_instance(obj)) {
525 obj = scratch_java_mirror(obj);
526 }
527
528 CachedOopInfo* obj_info = get_cached_oop_info(obj);
529 const char* error = nullptr;
530 if (obj_info == nullptr) {
531 error = "Not a cached oop";
532 } else if (obj_info->root_index() < 0) {
533 error = "Not a cached oop root";
534 } else {
535 return obj_info->root_index();
536 }
537
538 ResourceMark rm;
539 log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error,
540 cast_from_oop<uintptr_t>(obj),
541 obj->klass()->external_name());
542 return -1;
543 }
544
545 oop HeapShared::get_root(int index, bool clear) {
546 assert(index >= 0, "sanity");
547 assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
548
549 oop result;
550 if (HeapShared::is_loading_streaming_mode()) {
551 result = AOTStreamedHeapLoader::get_root(index);
552 } else {
553 assert(HeapShared::is_loading_mapping_mode(), "must be");
554 result = AOTMappedHeapLoader::get_root(index);
555 }
556
557 if (clear) {
558 clear_root(index);
559 }
560
561 return result;
562 }
563
564 void HeapShared::finish_materialize_objects() {
565 if (AOTStreamedHeapLoader::is_in_use()) {
566 AOTStreamedHeapLoader::finish_materialize_objects();
567 }
568 }
569
570 void HeapShared::clear_root(int index) {
571 if (CDSConfig::is_using_aot_linked_classes()) {
572 // When AOT linked classes are in use, all roots will be in use all
573 // the time, there's no benefit for clearing the roots. Also, we
574 // can't clear the roots as they can be shared.
575 return;
576 }
577
578 assert(index >= 0, "sanity");
579 assert(CDSConfig::is_using_archive(), "must be");
580 if (is_archived_heap_in_use()) {
581 if (log_is_enabled(Debug, aot, heap)) {
582 log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));
583 }
584 if (HeapShared::is_loading_streaming_mode()) {
585 AOTStreamedHeapLoader::clear_root(index);
586 } else {
587 assert(HeapShared::is_loading_mapping_mode(), "must be");
588 AOTMappedHeapLoader::clear_root(index);
589 }
590 }
591 }
592
593 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
594 assert(CDSConfig::is_dumping_heap(), "dump-time only");
595
596 assert(!obj->is_stackChunk(), "do not archive stack chunks");
597 if (has_been_archived(obj)) {
638 // returned from jdk.internal.misc.CDS::initializeFromArchive().
639 // See HeapShared::initialize_from_archived_subgraph().
640 {
641 AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
642 }
643
644 if (java_lang_Class::is_instance(obj)) {
645 Klass* mirror_k = java_lang_Class::as_Klass(obj);
646 if (mirror_k != nullptr) {
647 AOTArtifactFinder::add_cached_class(mirror_k);
648 }
649 } else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
650 Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
651 if (m != nullptr) {
652 if (RegeneratedClasses::has_been_regenerated(m)) {
653 m = RegeneratedClasses::get_regenerated_object(m);
654 }
655 InstanceKlass* method_holder = m->method_holder();
656 AOTArtifactFinder::add_cached_class(method_holder);
657 }
658 } else if (AOTCodeCache::is_dumping_code() &&
659 (java_lang_invoke_MethodHandle::is_instance(obj) || is_interned_string(obj))) {
660 // Needed by AOT compiler.
661 append_root(obj);
662 }
663 }
664
665 if (log_is_enabled(Debug, aot, heap)) {
666 ResourceMark rm;
667 LogTarget(Debug, aot, heap) log;
668 LogStream out(log);
669 out.print("Archived heap object " PTR_FORMAT " : %s ",
670 p2i(obj), obj->klass()->external_name());
671 if (java_lang_Class::is_instance(obj)) {
672 Klass* k = java_lang_Class::as_Klass(obj);
673 if (k != nullptr) {
674 out.print("%s", k->external_name());
675 } else {
676 out.print("primitive");
677 }
678 }
679 out.cr();
680 }
681
709 remove(ptr);
710 }
711 }
712 };
713
714 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
715 if (CDSConfig::is_dumping_preimage_static_archive() && scratch_resolved_references(src) != nullptr) {
716 // We are in AOT training run. The class has been redefined and we are giving it a new resolved_reference.
717 // Ignore it, as this class will be excluded from the AOT config.
718 return;
719 }
720 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
721 _scratch_objects_table->set_oop(src, dest);
722 }
723 }
724
725 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
726 return (objArrayOop)_scratch_objects_table->get_oop(src);
727 }
728
729 void HeapShared::init_dumping() {
730 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
731 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
732 _pending_roots->append(nullptr); // root index 0 represents a null oop
733 }
734
735 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
736 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
737 BasicType bt = (BasicType)i;
738 if (!is_reference_type(bt)) {
739 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
740 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
741 }
742 }
743 }
744
745 // Given java_mirror that represents a (primitive or reference) type T,
746 // return the "scratch" version that represents the same type T. Note
747 // that java_mirror will be returned if the mirror is already a scratch mirror.
748 //
749 // See java_lang_Class::create_scratch_mirror() for more info.
750 oop HeapShared::scratch_java_mirror(oop java_mirror) {
751 assert(java_lang_Class::is_instance(java_mirror), "must be");
752
829 if (RegeneratedClasses::is_regenerated_object(ik)) {
830 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
831 precond(orig_ik->is_initialized());
832 orig_mirror = orig_ik->java_mirror();
833 } else {
834 precond(ik->is_initialized());
835 orig_mirror = ik->java_mirror();
836 }
837
838 oop m = scratch_java_mirror(ik);
839 int nfields = 0;
840 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
841 if (fs.access_flags().is_static()) {
842 fieldDescriptor& fd = fs.field_descriptor();
843 int offset = fd.offset();
844 switch (fd.field_type()) {
845 case T_OBJECT:
846 case T_ARRAY:
847 {
848 oop field_obj = orig_mirror->obj_field(offset);
849 m->obj_field_put(offset, field_obj);
850 if (field_obj != nullptr) {
851 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
852 assert(success, "sanity");
853 }
854 }
855 break;
856 case T_BOOLEAN:
857 m->bool_field_put(offset, orig_mirror->bool_field(offset));
858 break;
859 case T_BYTE:
860 m->byte_field_put(offset, orig_mirror->byte_field(offset));
861 break;
862 case T_SHORT:
863 m->short_field_put(offset, orig_mirror->short_field(offset));
864 break;
865 case T_CHAR:
866 m->char_field_put(offset, orig_mirror->char_field(offset));
867 break;
868 case T_INT:
903 // We need to retain the identity_hash, because it may have been used by some hashtables
904 // in the shared heap.
905 if (!orig_mirror->fast_no_hash_check()) {
906 intptr_t src_hash = orig_mirror->identity_hash();
907 if (UseCompactObjectHeaders) {
908 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
909 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
910 } else {
911 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
912 }
913 assert(scratch_m->mark().is_unlocked(), "sanity");
914
915 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
916 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
917 }
918
919 if (CDSConfig::is_dumping_aot_linked_classes()) {
920 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
921 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
922 }
923
924 Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
925 if (CDSConfig::is_dumping_reflection_data() &&
926 k != nullptr && k->is_instance_klass() &&
927 java_lang_Class::reflection_data(orig_mirror) != nullptr &&
928 AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
929 java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
930 }
931 }
932
933 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
934 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
935 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
936 if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
937 return HeapShared::scratch_resolved_references(src_ik->constants());
938 }
939 }
940 return nullptr;
941 }
942
943 int HeapShared::archive_exception_instance(oop exception) {
944 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
945 assert(success, "sanity");
946 return append_root(exception);
947 }
948
949 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
950 OopHandle oh(&src_obj);
953 has_oop_pointers = info->has_oop_pointers();
954 has_native_pointers = info->has_native_pointers();
955 }
956
957 void HeapShared::set_has_native_pointers(oop src_obj) {
958 OopHandle oh(&src_obj);
959 CachedOopInfo* info = archived_object_cache()->get(oh);
960 assert(info != nullptr, "must be");
961 info->set_has_native_pointers();
962 }
963
964 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
965 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
966 void HeapShared::start_scanning_for_oops() {
967 {
968 NoSafepointVerifier nsv;
969
970 // The special subgraph doesn't belong to any class. We use Object_klass() here just
971 // for convenience.
972 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
973 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
974
975 // Cache for recording where the archived objects are copied to
976 create_archived_object_cache();
977
978 if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {
979 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
980 UseCompressedOops ? p2i(CompressedOops::begin()) :
981 p2i((address)G1CollectedHeap::heap()->reserved().start()),
982 UseCompressedOops ? p2i(CompressedOops::end()) :
983 p2i((address)G1CollectedHeap::heap()->reserved().end()));
984 }
985
986 archive_subgraphs();
987 }
988
989 init_seen_objects_table();
990 Universe::archive_exception_instances();
991 }
992
993 void HeapShared::end_scanning_for_oops() {
994 if (is_writing_mapping_mode()) {
995 StringTable::init_shared_table();
996 }
997 delete_seen_objects_table();
998 }
999
1000 void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
1001 {
1002 NoSafepointVerifier nsv;
1003 if (!SkipArchiveHeapVerification) {
1004 CDSHeapVerifier::verify();
1005 }
1006 check_special_subgraph_classes();
1007 }
1008
1009 if (HeapShared::is_writing_mapping_mode()) {
1010 StringTable::write_shared_table();
1011 AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
1012 } else {
1013 assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
1014 AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
1015 }
1016
1017 ArchiveBuilder::OtherROAllocMark mark;
1018 write_subgraph_info_table();
1019
1020 delete _pending_roots;
1021 _pending_roots = nullptr;
1022
1023 make_archived_object_cache_gc_safe();
1024 }
1025
1026 void HeapShared::scan_java_mirror(oop orig_mirror) {
1027 oop m = scratch_java_mirror(orig_mirror);
1028 if (m != nullptr) { // nullptr if for custom class loader
1029 copy_java_mirror(orig_mirror, m);
1030 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
1031 assert(success, "sanity");
1032
1033 oop extra;
1034 if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
1035 success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
1036 assert(success, "sanity");
1037 }
1038 }
1039 }
1040
1041 void HeapShared::scan_java_class(Klass* orig_k) {
1042 scan_java_mirror(orig_k->java_mirror());
1043
1044 if (orig_k->is_instance_klass()) {
1045 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
1046 orig_ik->constants()->prepare_resolved_references_for_archiving();
1047 objArrayOop rr = get_archived_resolved_references(orig_ik);
1048 if (rr != nullptr) {
1049 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
1050 assert(success, "must be");
1051 }
1052 }
1053 }
1054
1055 void HeapShared::archive_subgraphs() {
1056 assert(CDSConfig::is_dumping_heap(), "must be");
1057
1082 &created);
1083 assert(created, "must not initialize twice");
1084 return info;
1085 }
1086
1087 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
1088 assert(CDSConfig::is_dumping_heap(), "dump time only");
1089 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k);
1090 assert(info != nullptr, "must have been initialized");
1091 return info;
1092 }
1093
1094 // Add an entry field to the current KlassSubGraphInfo.
1095 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
1096 assert(CDSConfig::is_dumping_heap(), "dump time only");
1097 if (_subgraph_entry_fields == nullptr) {
1098 _subgraph_entry_fields =
1099 new (mtClass) GrowableArray<int>(10, mtClass);
1100 }
1101 _subgraph_entry_fields->append(static_field_offset);
1102 if (v == nullptr) {
1103 _subgraph_entry_fields->append(-1);
1104 } else {
1105 _subgraph_entry_fields->append(HeapShared::append_root(v));
1106 }
1107 }
1108
1109 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
1110 // Only objects of boot classes can be included in sub-graph.
1111 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
1112 assert(CDSConfig::is_dumping_heap(), "dump time only");
1113
1114 if (_subgraph_object_klasses == nullptr) {
1115 _subgraph_object_klasses =
1116 new (mtClass) GrowableArray<Klass*>(50, mtClass);
1117 }
1118
1119 if (_k == orig_k) {
1120 // Don't add the Klass containing the sub-graph to it's own klass
1121 // initialization list.
1122 return;
1123 }
1124
1125 if (orig_k->is_instance_klass()) {
1126 #ifdef ASSERT
1429 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1430 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1431 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1432 resolve_classes_for_subgraph_of(current, k);
1433 }
1434 }
1435
1436 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1437 JavaThread* THREAD = current;
1438 ExceptionMark em(THREAD);
1439 const ArchivedKlassSubGraphInfoRecord* record =
1440 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1441 if (HAS_PENDING_EXCEPTION) {
1442 CLEAR_PENDING_EXCEPTION;
1443 }
1444 if (record == nullptr) {
1445 clear_archived_roots_of(k);
1446 }
1447 }
1448
1449 static const char* java_lang_invoke_core_klasses[] = {
1450 "java/lang/invoke/Invokers$Holder",
1451 "java/lang/invoke/MethodHandle",
1452 "java/lang/invoke/MethodHandleNatives",
1453 "java/lang/invoke/DirectMethodHandle$Holder",
1454 "java/lang/invoke/DelegatingMethodHandle$Holder",
1455 "java/lang/invoke/LambdaForm$Holder",
1456 "java/lang/invoke/BoundMethodHandle$Species_L",
1457 };
1458
1459 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1460 if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) {
1461 int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
1462 for (int i = 0; i < len; i++) {
1463 resolve_or_init(java_lang_invoke_core_klasses[i], true, CHECK);
1464 }
1465 }
1466 }
1467
1468 bool HeapShared::is_core_java_lang_invoke_klass(InstanceKlass* klass) {
1469 // TODO: Crude, rewrite using Symbols or vmClasses instead
1470 ResourceMark rm;
1471 char* s2 = klass->name()->as_C_string();
1472 int len = sizeof(java_lang_invoke_core_klasses)/sizeof(char*);
1473 for (int i = 0; i < len; i++) {
1474 if (strcmp(java_lang_invoke_core_klasses[i], s2) == 0) {
1475 return true;
1476 }
1477 }
1478 return false;
1479 }
1480
1481 // Initialize the InstanceKlasses of objects that are reachable from the following roots:
1482 // - interned strings
1483 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses.
1484 // - ConstantPool::resolved_references()
1485 // - Universe::<xxx>_exception_instance()
1486 //
1487 // For example, if this enum class is initialized at AOT cache assembly time:
1488 //
1489 // enum Fruit {
1490 // APPLE, ORANGE, BANANA;
1491 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE));
1492 // }
1493 //
1494 // the aot-initialized mirror of Fruit has a static field that references HashSet, which
1495 // should be initialized before any Java code can access the Fruit class. Note that
1496 // HashSet itself doesn't necessary need to be an aot-initialized class.
1497 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
1498 if (!is_archived_heap_in_use()) {
1499 return;
1500 }
1655 ik->initialize(CHECK);
1656 } else if (k->is_objArray_klass()) {
1657 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1658 oak->initialize(CHECK);
1659 }
1660 }
1661 }
1662
1663 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1664 verify_the_heap(k, "before");
1665
1666 Array<int>* entry_field_records = record->entry_field_records();
1667 if (entry_field_records != nullptr) {
1668 int efr_len = entry_field_records->length();
1669 assert(efr_len % 2 == 0, "sanity");
1670 for (int i = 0; i < efr_len; i += 2) {
1671 int field_offset = entry_field_records->at(i);
1672 int root_index = entry_field_records->at(i+1);
1673 // Load the subgraph entry fields from the record and store them back to
1674 // the corresponding fields within the mirror.
1675 oop v;
1676 if (root_index < 0) {
1677 v = nullptr;
1678 } else {
1679 v = get_root(root_index, /*clear=*/true);
1680 }
1681 oop m = k->java_mirror();
1682 if (k->has_aot_initialized_mirror()) {
1683 assert(v == m->obj_field(field_offset), "must be aot-initialized");
1684 } else {
1685 m->obj_field_put(field_offset, v);
1686 }
1687 log_debug(aot, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v));
1688 }
1689
1690 // Done. Java code can see the archived sub-graphs referenced from k's
1691 // mirror after this point.
1692 if (log_is_enabled(Info, aot, heap)) {
1693 ResourceMark rm;
1694 log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s",
1695 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "",
1696 k->has_aot_initialized_mirror() ? " (aot-inited)" : "");
1697 }
1698 }
1699
1700 verify_the_heap(k, "after ");
1802 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1803 PointsToOopsChecker points_to_oops_checker;
1804 obj->oop_iterate(&points_to_oops_checker);
1805 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1806 }
1807
1808 void HeapShared::init_box_classes(TRAPS) {
1809 if (is_archived_heap_in_use()) {
1810 vmClasses::Boolean_klass()->initialize(CHECK);
1811 vmClasses::Character_klass()->initialize(CHECK);
1812 vmClasses::Float_klass()->initialize(CHECK);
1813 vmClasses::Double_klass()->initialize(CHECK);
1814 vmClasses::Byte_klass()->initialize(CHECK);
1815 vmClasses::Short_klass()->initialize(CHECK);
1816 vmClasses::Integer_klass()->initialize(CHECK);
1817 vmClasses::Long_klass()->initialize(CHECK);
1818 vmClasses::Void_klass()->initialize(CHECK);
1819 }
1820 }
1821
1822 void HeapShared::exit_on_error() {
1823 if (_context != nullptr) {
1824 ResourceMark rm;
1825 LogStream ls(Log(cds, heap)::error());
1826 ls.print_cr("Context");
1827 for (int i = 0; i < _context->length(); i++) {
1828 const char* s = _context->at(i);
1829 ls.print_cr("- %s", s);
1830 }
1831 }
1832 debug_trace();
1833 AOTMetaspace::unrecoverable_writing_error();
1834 }
1835
1836 // (1) If orig_obj has not been archived yet, archive it.
1837 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1838 // trace all objects that are reachable from it, and make sure these objects are archived.
1839 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1840 // were already archived when this function is called)
1841 bool HeapShared::archive_reachable_objects_from(int level,
1842 KlassSubGraphInfo* subgraph_info,
1843 oop orig_obj) {
1844 assert(orig_obj != nullptr, "must be");
1845 PendingOopStack stack;
1846 stack.push(PendingOop(orig_obj, nullptr, level));
1847
1848 while (stack.length() > 0) {
1849 PendingOop po = stack.pop();
1850 _object_being_archived = po;
1851 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1852 _object_being_archived = PendingOop();
1853
1854 if (!status) {
1855 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
2082 verify_subgraph_from(f);
2083 }
2084 }
2085
2086 void HeapShared::verify_subgraph_from(oop orig_obj) {
2087 if (!has_been_archived(orig_obj)) {
2088 // It's OK for the root of a subgraph to be not archived. See comments in
2089 // archive_reachable_objects_from().
2090 return;
2091 }
2092
2093 // Verify that all objects reachable from orig_obj are archived.
2094 init_seen_objects_table();
2095 verify_reachable_objects_from(orig_obj);
2096 delete_seen_objects_table();
2097 }
2098
2099 void HeapShared::verify_reachable_objects_from(oop obj) {
2100 _num_total_verifications ++;
2101 if (java_lang_Class::is_instance(obj)) {
2102 Klass* k = java_lang_Class::as_Klass(obj);
2103 if (RegeneratedClasses::has_been_regenerated(k)) {
2104 k = RegeneratedClasses::get_regenerated_object(k);
2105 obj = k->java_mirror();
2106 }
2107 obj = scratch_java_mirror(obj);
2108 assert(obj != nullptr, "must be");
2109 }
2110 if (!has_been_seen_during_subgraph_recording(obj)) {
2111 set_has_been_seen_during_subgraph_recording(obj);
2112 assert(has_been_archived(obj), "must be");
2113 VerifySharedOopClosure walker;
2114 obj->oop_iterate(&walker);
2115 }
2116 }
2117 #endif
2118
2119 void HeapShared::check_special_subgraph_classes() {
2120 if (CDSConfig::is_dumping_aot_linked_classes()) {
2121 // We can have aot-initialized classes (such as Enums) that can reference objects
2122 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2123 // aot-initialize classes that are "safe".
2124 //
2125 // TODO: we need an automatic tool that checks the safety of aot-initialized
2126 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2409
2410 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2411 bool is_full_module_graph) {
2412 _num_total_subgraph_recordings = 0;
2413 _num_total_walked_objs = 0;
2414 _num_total_archived_objs = 0;
2415 _num_total_recorded_klasses = 0;
2416 _num_total_verifications = 0;
2417
2418 // For each class X that has one or more archived fields:
2419 // [1] Dump the subgraph of each archived field
2420 // [2] Create a list of all the class of the objects that can be reached
2421 // by any of these static fields.
2422 // At runtime, these classes are initialized before X's archived fields
2423 // are restored by HeapShared::initialize_from_archived_subgraph().
2424 for (int i = 0; fields[i].valid(); ) {
2425 ArchivableStaticFieldInfo* info = &fields[i];
2426 const char* klass_name = info->klass_name;
2427 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2428
2429 ContextMark cm(klass_name);
2430 // If you have specified consecutive fields of the same klass in
2431 // fields[], these will be archived in the same
2432 // {start_recording_subgraph ... done_recording_subgraph} pass to
2433 // save time.
2434 for (; fields[i].valid(); i++) {
2435 ArchivableStaticFieldInfo* f = &fields[i];
2436 if (f->klass_name != klass_name) {
2437 break;
2438 }
2439
2440 ContextMark cm(f->field_name);
2441 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2442 f->offset, f->field_name);
2443 }
2444 done_recording_subgraph(info->klass, klass_name);
2445 }
2446
2447 log_info(aot, heap)("Archived subgraph records = %zu",
2448 _num_total_subgraph_recordings);
2449 log_info(aot, heap)(" Walked %zu objects", _num_total_walked_objs);
2450 log_info(aot, heap)(" Archived %zu objects", _num_total_archived_objs);
2451 log_info(aot, heap)(" Recorded %zu klasses", _num_total_recorded_klasses);
2452
2453 #ifndef PRODUCT
2454 for (int i = 0; fields[i].valid(); i++) {
2455 ArchivableStaticFieldInfo* f = &fields[i];
2456 verify_subgraph_from_static_field(f->klass, f->offset);
2457 }
2458 log_info(aot, heap)(" Verified %zu references", _num_total_verifications);
2459 #endif
2460 }
2461
2462 bool HeapShared::is_interned_string(oop obj) {
2463 if (!java_lang_String::is_instance(obj)) {
2464 return false;
2465 }
2466
2467 ResourceMark rm;
2468 int len = 0;
2469 jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
2470 if (name == nullptr) {
2471 fatal("Insufficient memory for dumping");
2472 }
2473 return StringTable::lookup(name, len) == obj;
2474 }
2475
2476 bool HeapShared::is_dumped_interned_string(oop o) {
2477 return is_interned_string(o) && has_been_archived(o);
2478 }
2479
2480 // These tables should be used only within the CDS safepoint, so
2481 // delete them before we exit the safepoint. Otherwise the table will
2482 // contain bad oops after a GC.
2483 void HeapShared::delete_tables_with_raw_oops() {
2484 assert(_seen_objects_table == nullptr, "should have been deleted");
2485
2486 if (is_writing_mapping_mode()) {
2487 AOTMappedHeapWriter::delete_tables_with_raw_oops();
2488 } else {
2489 assert(is_writing_streaming_mode(), "what other mode?");
2490 AOTStreamedHeapWriter::delete_tables_with_raw_oops();
2491 }
2492 }
2493
2494 void HeapShared::debug_trace() {
2495 ResourceMark rm;
2496 oop referrer = _object_being_archived.referrer();
2497 if (referrer != nullptr) {
|