6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotMetaspace.hpp"
30 #include "cds/aotOopChecker.hpp"
31 #include "cds/aotReferenceObjSupport.hpp"
32 #include "cds/archiveBuilder.hpp"
33 #include "cds/archiveHeapLoader.hpp"
34 #include "cds/archiveHeapWriter.hpp"
35 #include "cds/archiveUtils.hpp"
36 #include "cds/cdsConfig.hpp"
37 #include "cds/cdsEnumKlass.hpp"
38 #include "cds/cdsHeapVerifier.hpp"
39 #include "cds/heapShared.hpp"
40 #include "cds/regeneratedClasses.hpp"
41 #include "classfile/classLoaderData.hpp"
42 #include "classfile/javaClasses.inline.hpp"
43 #include "classfile/modules.hpp"
44 #include "classfile/stringTable.hpp"
45 #include "classfile/symbolTable.hpp"
46 #include "classfile/systemDictionary.hpp"
47 #include "classfile/systemDictionaryShared.hpp"
74 #include "gc/g1/g1CollectedHeap.hpp"
75 #endif
76
77 #if INCLUDE_CDS_JAVA_HEAP
78
79 struct ArchivableStaticFieldInfo {
80 const char* klass_name;
81 const char* field_name;
82 InstanceKlass* klass;
83 int offset;
84 BasicType type;
85
86 ArchivableStaticFieldInfo(const char* k, const char* f)
87 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
88
89 bool valid() {
90 return klass_name != nullptr;
91 }
92 };
93
94 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
95
96 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
97 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
98 size_t HeapShared::_total_obj_count;
99 size_t HeapShared::_total_obj_size;
100
101 #ifndef PRODUCT
102 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
103 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
104 static const char* _test_class_name = nullptr;
105 static Klass* _test_class = nullptr;
106 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
107 #endif
108
109
110 //
111 // If you add new entries to the following tables, you should know what you're doing!
112 //
113
114 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
115 {"java/lang/Integer$IntegerCache", "archivedCache"},
116 {"java/lang/Long$LongCache", "archivedCache"},
117 {"java/lang/Byte$ByteCache", "archivedCache"},
118 {"java/lang/Short$ShortCache", "archivedCache"},
119 {"java/lang/Character$CharacterCache", "archivedCache"},
120 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
121 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
122 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
123 {"java/util/ImmutableCollections", "archivedObjects"},
124 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
125 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
126 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
127
128 #ifndef PRODUCT
129 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
130 #endif
131 {nullptr, nullptr},
132 };
133
134 // full module graph
135 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
136 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
137 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
138 {"java/lang/Module$ArchivedData", "archivedData"},
139 {nullptr, nullptr},
140 };
141
142 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
143 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
144 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
145 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
146 int HeapShared::_root_segment_max_size_elems;
147 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
148 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
149
150 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
151 for (int i = 0; fields[i].valid(); i++) {
152 if (fields[i].klass == ik) {
153 return true;
154 }
155 }
156 return false;
157 }
158
159 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
160 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
161 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
162 }
163
164 oop HeapShared::CachedOopInfo::orig_referrer() const {
236 Handle boot_loader(THREAD, result.get_oop());
237 reset_states(boot_loader(), CHECK);
238 }
239
240 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
241
242 bool HeapShared::has_been_archived(oop obj) {
243 assert(CDSConfig::is_dumping_heap(), "dump-time only");
244 OopHandle oh(&obj);
245 return archived_object_cache()->get(oh) != nullptr;
246 }
247
248 int HeapShared::append_root(oop obj) {
249 assert(CDSConfig::is_dumping_heap(), "dump-time only");
250 if (obj != nullptr) {
251 assert(has_been_archived(obj), "must be");
252 }
253 // No GC should happen since we aren't scanning _pending_roots.
254 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
255
256 return _pending_roots->append(obj);
257 }
258
259 objArrayOop HeapShared::root_segment(int segment_idx) {
260 if (CDSConfig::is_dumping_heap()) {
261 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
262 } else {
263 assert(CDSConfig::is_using_archive(), "must be");
264 }
265
266 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
267 assert(segment != nullptr, "should have been initialized");
268 return segment;
269 }
270
271 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
272 assert(_root_segment_max_size_elems > 0, "sanity");
273
274 // Try to avoid divisions for the common case.
275 if (idx < _root_segment_max_size_elems) {
276 seg_idx = 0;
277 int_idx = idx;
278 } else {
279 seg_idx = idx / _root_segment_max_size_elems;
280 int_idx = idx % _root_segment_max_size_elems;
281 }
282
283 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
284 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
285 }
286
287 // Returns an objArray that contains all the roots of the archived objects
288 oop HeapShared::get_root(int index, bool clear) {
289 assert(index >= 0, "sanity");
290 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
422 OopHandle* handle = get(ptr);
423 if (handle != nullptr) {
424 handle->release(Universe::vm_global());
425 remove(ptr);
426 }
427 }
428 };
429
430 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
431 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
432 _scratch_objects_table->set_oop(src, dest);
433 }
434 }
435
436 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
437 return (objArrayOop)_scratch_objects_table->get_oop(src);
438 }
439
440 void HeapShared::init_dumping() {
441 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
442 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
443 }
444
445 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
446 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
447 BasicType bt = (BasicType)i;
448 if (!is_reference_type(bt)) {
449 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
450 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
451 }
452 }
453 }
454
455 // Given java_mirror that represents a (primitive or reference) type T,
456 // return the "scratch" version that represents the same type T.
457 // Note that if java_mirror will be returned if it's already a
458 // scratch mirror.
459 //
460 // See java_lang_Class::create_scratch_mirror() for more info.
461 oop HeapShared::scratch_java_mirror(oop java_mirror) {
462 assert(java_lang_Class::is_instance(java_mirror), "must be");
463
464 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
465 BasicType bt = (BasicType)i;
466 if (!is_reference_type(bt)) {
467 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
468 return java_mirror;
469 }
470 }
471 }
472
473 if (java_lang_Class::is_primitive(java_mirror)) {
474 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
475 } else {
476 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
477 }
478 }
479
480 oop HeapShared::scratch_java_mirror(BasicType t) {
481 assert((uint)t < T_VOID+1, "range check");
482 assert(!is_reference_type(t), "sanity");
483 return _scratch_basic_type_mirrors[t].resolve();
484 }
485
486 oop HeapShared::scratch_java_mirror(Klass* k) {
487 return _scratch_objects_table->get_oop(k);
488 }
489
490 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
491 _scratch_objects_table->set_oop(k, mirror);
492 }
493
494 void HeapShared::remove_scratch_objects(Klass* k) {
495 // Klass is being deallocated. Java mirror can still be alive, and it should not
496 // point to dead klass. We need to break the link from mirror to the Klass.
497 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
498 oop mirror = _scratch_objects_table->get_oop(k);
499 if (mirror != nullptr) {
500 java_lang_Class::set_klass(mirror, nullptr);
501 }
502 _scratch_objects_table->remove_oop(k);
503 if (k->is_instance_klass()) {
504 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
505 }
506 }
507
508 //TODO: we eventually want a more direct test for these kinds of things.
509 //For example the JVM could record some bit of context from the creation
510 //of the klass, such as who called the hidden class factory. Using
511 //string compares on names is fragile and will break as soon as somebody
512 //changes the names in the JDK code. See discussion in JDK-8342481 for
513 //related ideas about marking AOT-related classes.
514 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
515 return ik->is_hidden() &&
516 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
517 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
518 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
519 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
520 }
521
522 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
523 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
524 }
525
540 if (RegeneratedClasses::is_regenerated_object(ik)) {
541 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
542 precond(orig_ik->is_initialized());
543 orig_mirror = orig_ik->java_mirror();
544 } else {
545 precond(ik->is_initialized());
546 orig_mirror = ik->java_mirror();
547 }
548
549 oop m = scratch_java_mirror(ik);
550 int nfields = 0;
551 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
552 if (fs.access_flags().is_static()) {
553 fieldDescriptor& fd = fs.field_descriptor();
554 int offset = fd.offset();
555 switch (fd.field_type()) {
556 case T_OBJECT:
557 case T_ARRAY:
558 {
559 oop field_obj = orig_mirror->obj_field(offset);
560 if (offset == java_lang_Class::reflection_data_offset()) {
561 // Class::reflectData use SoftReference, which cannot be archived. Set it
562 // to null and it will be recreated at runtime.
563 field_obj = nullptr;
564 }
565 m->obj_field_put(offset, field_obj);
566 if (field_obj != nullptr) {
567 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
568 assert(success, "sanity");
569 }
570 }
571 break;
572 case T_BOOLEAN:
573 m->bool_field_put(offset, orig_mirror->bool_field(offset));
574 break;
575 case T_BYTE:
576 m->byte_field_put(offset, orig_mirror->byte_field(offset));
577 break;
578 case T_SHORT:
579 m->short_field_put(offset, orig_mirror->short_field(offset));
580 break;
581 case T_CHAR:
582 m->char_field_put(offset, orig_mirror->char_field(offset));
583 break;
584 case T_INT:
619 // We need to retain the identity_hash, because it may have been used by some hashtables
620 // in the shared heap.
621 if (!orig_mirror->fast_no_hash_check()) {
622 intptr_t src_hash = orig_mirror->identity_hash();
623 if (UseCompactObjectHeaders) {
624 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
625 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
626 } else {
627 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
628 }
629 assert(scratch_m->mark().is_unlocked(), "sanity");
630
631 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
632 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
633 }
634
635 if (CDSConfig::is_dumping_aot_linked_classes()) {
636 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
637 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
638 }
639 }
640
641 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
642 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
643 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
644 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
645 return HeapShared::scratch_resolved_references(src_ik->constants());
646 }
647 }
648 return nullptr;
649 }
650
651 void HeapShared::archive_strings() {
652 oop shared_strings_array = StringTable::init_shared_strings_array();
653 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
654 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
655 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
656 }
657
658 int HeapShared::archive_exception_instance(oop exception) {
677 has_oop_pointers = info->has_oop_pointers();
678 has_native_pointers = info->has_native_pointers();
679 }
680
681 void HeapShared::set_has_native_pointers(oop src_obj) {
682 OopHandle oh(&src_obj);
683 CachedOopInfo* info = archived_object_cache()->get(oh);
684 assert(info != nullptr, "must be");
685 info->set_has_native_pointers();
686 }
687
688 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
689 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
690 void HeapShared::start_scanning_for_oops() {
691 {
692 NoSafepointVerifier nsv;
693
694 // The special subgraph doesn't belong to any class. We use Object_klass() here just
695 // for convenience.
696 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
697
698 // Cache for recording where the archived objects are copied to
699 create_archived_object_cache();
700
701 if (UseCompressedOops || UseG1GC) {
702 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
703 UseCompressedOops ? p2i(CompressedOops::begin()) :
704 p2i((address)G1CollectedHeap::heap()->reserved().start()),
705 UseCompressedOops ? p2i(CompressedOops::end()) :
706 p2i((address)G1CollectedHeap::heap()->reserved().end()));
707 }
708
709 archive_subgraphs();
710 }
711
712 init_seen_objects_table();
713 Universe::archive_exception_instances();
714 }
715
716 void HeapShared::end_scanning_for_oops() {
717 archive_strings();
718 delete_seen_objects_table();
719 }
720
721 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
722 {
723 NoSafepointVerifier nsv;
724 CDSHeapVerifier::verify();
725 check_special_subgraph_classes();
726 }
727
728 StringTable::write_shared_table();
729 ArchiveHeapWriter::write(_pending_roots, heap_info);
730
731 ArchiveBuilder::OtherROAllocMark mark;
732 write_subgraph_info_table();
733 }
734
735 void HeapShared::scan_java_mirror(oop orig_mirror) {
736 oop m = scratch_java_mirror(orig_mirror);
737 if (m != nullptr) { // nullptr if for custom class loader
738 copy_java_mirror(orig_mirror, m);
739 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
740 assert(success, "sanity");
741 }
742 }
743
744 void HeapShared::scan_java_class(Klass* orig_k) {
745 scan_java_mirror(orig_k->java_mirror());
746
747 if (orig_k->is_instance_klass()) {
748 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
749 orig_ik->constants()->prepare_resolved_references_for_archiving();
750 objArrayOop rr = get_archived_resolved_references(orig_ik);
751 if (rr != nullptr) {
752 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
753 assert(success, "must be");
754 }
755 }
756 }
757
758 void HeapShared::archive_subgraphs() {
759 assert(CDSConfig::is_dumping_heap(), "must be");
760
1115 which, k->external_name());
1116 FlagSetting fs1(VerifyBeforeGC, true);
1117 FlagSetting fs2(VerifyDuringGC, true);
1118 FlagSetting fs3(VerifyAfterGC, true);
1119 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1120 }
1121 }
1122 }
1123
1124 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1125 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1126 //
1127 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1128 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1129 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1130 void HeapShared::resolve_classes(JavaThread* current) {
1131 assert(CDSConfig::is_using_archive(), "runtime only!");
1132 if (!ArchiveHeapLoader::is_in_use()) {
1133 return; // nothing to do
1134 }
1135 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1136 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1137 }
1138
1139 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1140 for (int i = 0; fields[i].valid(); i++) {
1141 ArchivableStaticFieldInfo* info = &fields[i];
1142 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1143 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1144 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1145 resolve_classes_for_subgraph_of(current, k);
1146 }
1147 }
1148
1149 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1150 JavaThread* THREAD = current;
1151 ExceptionMark em(THREAD);
1152 const ArchivedKlassSubGraphInfoRecord* record =
1153 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1154 if (HAS_PENDING_EXCEPTION) {
1490 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1491 PointsToOopsChecker points_to_oops_checker;
1492 obj->oop_iterate(&points_to_oops_checker);
1493 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1494 }
1495
1496 void HeapShared::init_box_classes(TRAPS) {
1497 if (ArchiveHeapLoader::is_in_use()) {
1498 vmClasses::Boolean_klass()->initialize(CHECK);
1499 vmClasses::Character_klass()->initialize(CHECK);
1500 vmClasses::Float_klass()->initialize(CHECK);
1501 vmClasses::Double_klass()->initialize(CHECK);
1502 vmClasses::Byte_klass()->initialize(CHECK);
1503 vmClasses::Short_klass()->initialize(CHECK);
1504 vmClasses::Integer_klass()->initialize(CHECK);
1505 vmClasses::Long_klass()->initialize(CHECK);
1506 vmClasses::Void_klass()->initialize(CHECK);
1507 }
1508 }
1509
1510 // (1) If orig_obj has not been archived yet, archive it.
1511 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1512 // trace all objects that are reachable from it, and make sure these objects are archived.
1513 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1514 // were already archived when this function is called)
1515 bool HeapShared::archive_reachable_objects_from(int level,
1516 KlassSubGraphInfo* subgraph_info,
1517 oop orig_obj) {
1518 assert(orig_obj != nullptr, "must be");
1519 PendingOopStack stack;
1520 stack.push(PendingOop(orig_obj, nullptr, level));
1521
1522 while (stack.length() > 0) {
1523 PendingOop po = stack.pop();
1524 _object_being_archived = po;
1525 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1526 _object_being_archived = PendingOop();
1527
1528 if (!status) {
1529 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1756 verify_subgraph_from(f);
1757 }
1758 }
1759
1760 void HeapShared::verify_subgraph_from(oop orig_obj) {
1761 if (!has_been_archived(orig_obj)) {
1762 // It's OK for the root of a subgraph to be not archived. See comments in
1763 // archive_reachable_objects_from().
1764 return;
1765 }
1766
1767 // Verify that all objects reachable from orig_obj are archived.
1768 init_seen_objects_table();
1769 verify_reachable_objects_from(orig_obj);
1770 delete_seen_objects_table();
1771 }
1772
1773 void HeapShared::verify_reachable_objects_from(oop obj) {
1774 _num_total_verifications ++;
1775 if (java_lang_Class::is_instance(obj)) {
1776 obj = scratch_java_mirror(obj);
1777 assert(obj != nullptr, "must be");
1778 }
1779 if (!has_been_seen_during_subgraph_recording(obj)) {
1780 set_has_been_seen_during_subgraph_recording(obj);
1781 assert(has_been_archived(obj), "must be");
1782 VerifySharedOopClosure walker;
1783 obj->oop_iterate(&walker);
1784 }
1785 }
1786 #endif
1787
1788 void HeapShared::check_special_subgraph_classes() {
1789 if (CDSConfig::is_initing_classes_at_dump_time()) {
1790 // We can have aot-initialized classes (such as Enums) that can reference objects
1791 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1792 // aot-initialize classes that are "safe".
1793 //
1794 // TODO: we need an automatic tool that checks the safety of aot-initialized
1795 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2068
2069 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2070 bool is_full_module_graph) {
2071 _num_total_subgraph_recordings = 0;
2072 _num_total_walked_objs = 0;
2073 _num_total_archived_objs = 0;
2074 _num_total_recorded_klasses = 0;
2075 _num_total_verifications = 0;
2076
2077 // For each class X that has one or more archived fields:
2078 // [1] Dump the subgraph of each archived field
2079 // [2] Create a list of all the class of the objects that can be reached
2080 // by any of these static fields.
2081 // At runtime, these classes are initialized before X's archived fields
2082 // are restored by HeapShared::initialize_from_archived_subgraph().
2083 for (int i = 0; fields[i].valid(); ) {
2084 ArchivableStaticFieldInfo* info = &fields[i];
2085 const char* klass_name = info->klass_name;
2086 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2087
2088 // If you have specified consecutive fields of the same klass in
2089 // fields[], these will be archived in the same
2090 // {start_recording_subgraph ... done_recording_subgraph} pass to
2091 // save time.
2092 for (; fields[i].valid(); i++) {
2093 ArchivableStaticFieldInfo* f = &fields[i];
2094 if (f->klass_name != klass_name) {
2095 break;
2096 }
2097
2098 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2099 f->offset, f->field_name);
2100 }
2101 done_recording_subgraph(info->klass, klass_name);
2102 }
2103
2104 log_info(aot, heap)("Archived subgraph records = %d",
2105 _num_total_subgraph_recordings);
2106 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2107 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2108 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2109
2110 #ifndef PRODUCT
2111 for (int i = 0; fields[i].valid(); i++) {
2112 ArchivableStaticFieldInfo* f = &fields[i];
2113 verify_subgraph_from_static_field(f->klass, f->offset);
2114 }
2115 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2116 #endif
2117 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotCacheAccess.hpp"
27 #include "cds/aotClassInitializer.hpp"
28 #include "cds/aotClassLocation.hpp"
29 #include "cds/aotConstantPoolResolver.hpp"
30 #include "cds/aotLogging.hpp"
31 #include "cds/aotMetaspace.hpp"
32 #include "cds/aotOopChecker.hpp"
33 #include "cds/aotReferenceObjSupport.hpp"
34 #include "cds/archiveBuilder.hpp"
35 #include "cds/archiveHeapLoader.hpp"
36 #include "cds/archiveHeapWriter.hpp"
37 #include "cds/archiveUtils.hpp"
38 #include "cds/cdsConfig.hpp"
39 #include "cds/cdsEnumKlass.hpp"
40 #include "cds/cdsHeapVerifier.hpp"
41 #include "cds/heapShared.hpp"
42 #include "cds/regeneratedClasses.hpp"
43 #include "classfile/classLoaderData.hpp"
44 #include "classfile/javaClasses.inline.hpp"
45 #include "classfile/modules.hpp"
46 #include "classfile/stringTable.hpp"
47 #include "classfile/symbolTable.hpp"
48 #include "classfile/systemDictionary.hpp"
49 #include "classfile/systemDictionaryShared.hpp"
76 #include "gc/g1/g1CollectedHeap.hpp"
77 #endif
78
79 #if INCLUDE_CDS_JAVA_HEAP
80
81 struct ArchivableStaticFieldInfo {
82 const char* klass_name;
83 const char* field_name;
84 InstanceKlass* klass;
85 int offset;
86 BasicType type;
87
88 ArchivableStaticFieldInfo(const char* k, const char* f)
89 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
90
91 bool valid() {
92 return klass_name != nullptr;
93 }
94 };
95
96 class HeapShared::ContextMark : public StackObj {
97 ResourceMark rm;
98 public:
99 ContextMark(const char* c) : rm{} {
100 _context->push(c);
101 }
102 ~ContextMark() {
103 _context->pop();
104 }
105 };
106
107 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
108
109 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
110 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
111 size_t HeapShared::_total_obj_count;
112 size_t HeapShared::_total_obj_size;
113
114 #ifndef PRODUCT
115 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
116 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
117 static const char* _test_class_name = nullptr;
118 static Klass* _test_class = nullptr;
119 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
120 #endif
121
122
123 //
124 // If you add new entries to the following tables, you should know what you're doing!
125 //
126
127 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
128 {"java/lang/Integer$IntegerCache", "archivedCache"},
129 {"java/lang/Long$LongCache", "archivedCache"},
130 {"java/lang/Byte$ByteCache", "archivedCache"},
131 {"java/lang/Short$ShortCache", "archivedCache"},
132 {"java/lang/Character$CharacterCache", "archivedCache"},
133 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
134 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
135 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
136 {"java/util/ImmutableCollections", "archivedObjects"},
137 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
138 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
139 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
140 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
141
142 #ifndef PRODUCT
143 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
144 #endif
145 {nullptr, nullptr},
146 };
147
148 // full module graph
149 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
150 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
151 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
152 {"java/lang/Module$ArchivedData", "archivedData"},
153 {nullptr, nullptr},
154 };
155
156 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
157 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
158 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
159 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
160 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
161 int HeapShared::_root_segment_max_size_elems;
162 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
163 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
164
165 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
166 for (int i = 0; fields[i].valid(); i++) {
167 if (fields[i].klass == ik) {
168 return true;
169 }
170 }
171 return false;
172 }
173
174 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
175 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
176 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
177 }
178
179 oop HeapShared::CachedOopInfo::orig_referrer() const {
251 Handle boot_loader(THREAD, result.get_oop());
252 reset_states(boot_loader(), CHECK);
253 }
254
255 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
256
257 bool HeapShared::has_been_archived(oop obj) {
258 assert(CDSConfig::is_dumping_heap(), "dump-time only");
259 OopHandle oh(&obj);
260 return archived_object_cache()->get(oh) != nullptr;
261 }
262
263 int HeapShared::append_root(oop obj) {
264 assert(CDSConfig::is_dumping_heap(), "dump-time only");
265 if (obj != nullptr) {
266 assert(has_been_archived(obj), "must be");
267 }
268 // No GC should happen since we aren't scanning _pending_roots.
269 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
270
271 OopHandle oh(Universe::vm_global(), obj);
272 return _pending_roots->append(oh);
273 }
274
275 objArrayOop HeapShared::root_segment(int segment_idx) {
276 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
277 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
278 } else {
279 assert(CDSConfig::is_using_archive(), "must be");
280 }
281
282 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
283 assert(segment != nullptr, "should have been initialized");
284 return segment;
285 }
286
287 class OrigToScratchObjectTable: public HashTable<OopHandle, OopHandle,
288 36137, // prime number
289 AnyObj::C_HEAP,
290 mtClassShared,
291 HeapShared::oop_handle_hash,
292 HeapShared::oop_handle_equals> {};
293
294 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
295
296 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
297 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
298 if (_orig_to_scratch_object_table == nullptr) {
299 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
300 }
301
302 OopHandle orig_h(Universe::vm_global(), orig_obj);
303 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
304 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
305 }
306
307 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
308 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
309 if (_orig_to_scratch_object_table != nullptr) {
310 OopHandle orig(&orig_obj);
311 OopHandle* v = _orig_to_scratch_object_table->get(orig);
312 if (v != nullptr) {
313 return v->resolve();
314 }
315 }
316 return nullptr;
317 }
318
319 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
320 // to Strings and MH oops.
321 //
322 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
323 // and are accssed vis AOTCacheAccess::get_archived_object(int).
324 struct PermanentOopInfo {
325 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
326 int _heap_offset; // Offset of the object from the bottom of the archived heap.
327 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
328 };
329
330 class PermanentOopTable: public HashTable<OopHandle, PermanentOopInfo,
331 36137, // prime number
332 AnyObj::C_HEAP,
333 mtClassShared,
334 HeapShared::oop_handle_hash,
335 HeapShared::oop_handle_equals> {};
336
337 static int _dumptime_permanent_oop_count = 0;
338 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
339 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
340
341 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
342 // so we can remember their offset (from the bottom of the archived heap).
343 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
344 assert_at_safepoint();
345 if (_dumptime_permanent_oop_table == nullptr) {
346 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
347 }
348
349 PermanentOopInfo info(-1, offset);
350 OopHandle oh(Universe::vm_global(), obj);
351 _dumptime_permanent_oop_table->put_when_absent(oh, info);
352 }
353
354 // A permanent index is assigned to an archived object ONLY when
355 // the AOT compiler calls this function.
356 int HeapShared::get_archived_object_permanent_index(oop obj) {
357 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
358
359 if (!CDSConfig::is_dumping_heap()) {
360 return -1; // Called by the Leyden old workflow
361 }
362 if (_dumptime_permanent_oop_table == nullptr) {
363 return -1;
364 }
365
366 if (_orig_to_scratch_object_table != nullptr) {
367 OopHandle orig(&obj);
368 OopHandle* v = _orig_to_scratch_object_table->get(orig);
369 if (v != nullptr) {
370 obj = v->resolve();
371 }
372 }
373
374 OopHandle tmp(&obj);
375 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
376 if (info == nullptr) {
377 return -1;
378 } else {
379 if (info->_index < 0) {
380 info->_index = _dumptime_permanent_oop_count++;
381 }
382 return info->_index;
383 }
384 }
385
386 oop HeapShared::get_archived_object(int permanent_index) {
387 assert(permanent_index >= 0, "sanity");
388 assert(ArchiveHeapLoader::is_in_use(), "sanity");
389 assert(_runtime_permanent_oops != nullptr, "sanity");
390
391 return _runtime_permanent_oops->at(permanent_index).resolve();
392 }
393
394 // Remember all archived heap objects that have a permanent index.
395 // table[i] = offset of oop whose permanent index is i.
396 void CachedCodeDirectoryInternal::dumptime_init_internal() {
397 const int count = _dumptime_permanent_oop_count;
398 if (count == 0) {
399 // Avoid confusing CDS code with zero-sized tables, just return.
400 log_info(cds)("No permanent oops");
401 _permanent_oop_count = count;
402 _permanent_oop_offsets = nullptr;
403 return;
404 }
405
406 int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
407 for (int i = 0; i < count; i++) {
408 table[count] = -1;
409 }
410 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
411 int index = info._index;
412 if (index >= 0) {
413 assert(index < count, "sanity");
414 table[index] = info._heap_offset;
415 }
416 return true; // continue
417 });
418
419 for (int i = 0; i < count; i++) {
420 assert(table[i] >= 0, "must be");
421 }
422
423 log_info(cds)("Dumped %d permanent oops", count);
424
425 _permanent_oop_count = count;
426 AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
427 }
428
429 // This is called during the bootstrap of the production run, before any GC can happen.
430 // Record each permanent oop in a OopHandle for GC safety.
431 void CachedCodeDirectoryInternal::runtime_init_internal() {
432 int count = _permanent_oop_count;
433 int* table = _permanent_oop_offsets;
434 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
435 for (int i = 0; i < count; i++) {
436 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
437 OopHandle oh(Universe::vm_global(), obj);
438 _runtime_permanent_oops->append(oh);
439 }
440 };
441
442 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
443 assert(_root_segment_max_size_elems > 0, "sanity");
444
445 // Try to avoid divisions for the common case.
446 if (idx < _root_segment_max_size_elems) {
447 seg_idx = 0;
448 int_idx = idx;
449 } else {
450 seg_idx = idx / _root_segment_max_size_elems;
451 int_idx = idx % _root_segment_max_size_elems;
452 }
453
454 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
455 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
456 }
457
458 // Returns an objArray that contains all the roots of the archived objects
459 oop HeapShared::get_root(int index, bool clear) {
460 assert(index >= 0, "sanity");
461 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
593 OopHandle* handle = get(ptr);
594 if (handle != nullptr) {
595 handle->release(Universe::vm_global());
596 remove(ptr);
597 }
598 }
599 };
600
601 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
602 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
603 _scratch_objects_table->set_oop(src, dest);
604 }
605 }
606
607 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
608 return (objArrayOop)_scratch_objects_table->get_oop(src);
609 }
610
611 void HeapShared::init_dumping() {
612 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
613 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
614 }
615
616 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
617 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
618 BasicType bt = (BasicType)i;
619 if (!is_reference_type(bt)) {
620 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
621 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
622 track_scratch_object(Universe::java_mirror(bt), m);
623 }
624 }
625 }
626
627 // Given java_mirror that represents a (primitive or reference) type T,
628 // return the "scratch" version that represents the same type T.
629 // Note that if java_mirror will be returned if it's already a
630 // scratch mirror.
631 //
632 // See java_lang_Class::create_scratch_mirror() for more info.
633 oop HeapShared::scratch_java_mirror(oop java_mirror) {
634 assert(java_lang_Class::is_instance(java_mirror), "must be");
635
636 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
637 BasicType bt = (BasicType)i;
638 if (!is_reference_type(bt)) {
639 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
640 return java_mirror;
641 }
642 }
643 }
644
645 if (java_lang_Class::is_primitive(java_mirror)) {
646 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
647 } else {
648 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
649 }
650 }
651
652 oop HeapShared::scratch_java_mirror(BasicType t) {
653 assert((uint)t < T_VOID+1, "range check");
654 assert(!is_reference_type(t), "sanity");
655 return _scratch_basic_type_mirrors[t].resolve();
656 }
657
658 oop HeapShared::scratch_java_mirror(Klass* k) {
659 return _scratch_objects_table->get_oop(k);
660 }
661
662 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
663 track_scratch_object(k->java_mirror(), mirror);
664 _scratch_objects_table->set_oop(k, mirror);
665 }
666
667 void HeapShared::remove_scratch_objects(Klass* k) {
668 // Klass is being deallocated. Java mirror can still be alive, and it should not
669 // point to dead klass. We need to break the link from mirror to the Klass.
670 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
671 oop mirror = _scratch_objects_table->get_oop(k);
672 if (mirror != nullptr) {
673 java_lang_Class::set_klass(mirror, nullptr);
674 }
675 _scratch_objects_table->remove_oop(k);
676 if (k->is_instance_klass()) {
677 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
678 }
679 if (mirror != nullptr) {
680 OopHandle tmp(&mirror);
681 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
682 if (v != nullptr) {
683 oop scratch_mirror = v->resolve();
684 java_lang_Class::set_klass(scratch_mirror, nullptr);
685 _orig_to_scratch_object_table->remove(tmp);
686 }
687 }
688 }
689
690 //TODO: we eventually want a more direct test for these kinds of things.
691 //For example the JVM could record some bit of context from the creation
692 //of the klass, such as who called the hidden class factory. Using
693 //string compares on names is fragile and will break as soon as somebody
694 //changes the names in the JDK code. See discussion in JDK-8342481 for
695 //related ideas about marking AOT-related classes.
696 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
697 return ik->is_hidden() &&
698 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
699 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
700 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
701 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
702 }
703
704 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
705 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
706 }
707
722 if (RegeneratedClasses::is_regenerated_object(ik)) {
723 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
724 precond(orig_ik->is_initialized());
725 orig_mirror = orig_ik->java_mirror();
726 } else {
727 precond(ik->is_initialized());
728 orig_mirror = ik->java_mirror();
729 }
730
731 oop m = scratch_java_mirror(ik);
732 int nfields = 0;
733 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
734 if (fs.access_flags().is_static()) {
735 fieldDescriptor& fd = fs.field_descriptor();
736 int offset = fd.offset();
737 switch (fd.field_type()) {
738 case T_OBJECT:
739 case T_ARRAY:
740 {
741 oop field_obj = orig_mirror->obj_field(offset);
742 m->obj_field_put(offset, field_obj);
743 if (field_obj != nullptr) {
744 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
745 assert(success, "sanity");
746 }
747 }
748 break;
749 case T_BOOLEAN:
750 m->bool_field_put(offset, orig_mirror->bool_field(offset));
751 break;
752 case T_BYTE:
753 m->byte_field_put(offset, orig_mirror->byte_field(offset));
754 break;
755 case T_SHORT:
756 m->short_field_put(offset, orig_mirror->short_field(offset));
757 break;
758 case T_CHAR:
759 m->char_field_put(offset, orig_mirror->char_field(offset));
760 break;
761 case T_INT:
796 // We need to retain the identity_hash, because it may have been used by some hashtables
797 // in the shared heap.
798 if (!orig_mirror->fast_no_hash_check()) {
799 intptr_t src_hash = orig_mirror->identity_hash();
800 if (UseCompactObjectHeaders) {
801 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
802 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
803 } else {
804 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
805 }
806 assert(scratch_m->mark().is_unlocked(), "sanity");
807
808 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
809 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
810 }
811
812 if (CDSConfig::is_dumping_aot_linked_classes()) {
813 java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
814 java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
815 }
816
817 Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
818 if (CDSConfig::is_dumping_reflection_data() &&
819 k != nullptr && k->is_instance_klass() &&
820 java_lang_Class::reflection_data(orig_mirror) != nullptr &&
821 AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
822 java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
823 }
824 }
825
826 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
827 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
828 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
829 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
830 return HeapShared::scratch_resolved_references(src_ik->constants());
831 }
832 }
833 return nullptr;
834 }
835
836 void HeapShared::archive_strings() {
837 oop shared_strings_array = StringTable::init_shared_strings_array();
838 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
839 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
840 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
841 }
842
843 int HeapShared::archive_exception_instance(oop exception) {
862 has_oop_pointers = info->has_oop_pointers();
863 has_native_pointers = info->has_native_pointers();
864 }
865
866 void HeapShared::set_has_native_pointers(oop src_obj) {
867 OopHandle oh(&src_obj);
868 CachedOopInfo* info = archived_object_cache()->get(oh);
869 assert(info != nullptr, "must be");
870 info->set_has_native_pointers();
871 }
872
873 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
874 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
875 void HeapShared::start_scanning_for_oops() {
876 {
877 NoSafepointVerifier nsv;
878
879 // The special subgraph doesn't belong to any class. We use Object_klass() here just
880 // for convenience.
881 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
882 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
883
884 // Cache for recording where the archived objects are copied to
885 create_archived_object_cache();
886
887 if (UseCompressedOops || UseG1GC) {
888 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
889 UseCompressedOops ? p2i(CompressedOops::begin()) :
890 p2i((address)G1CollectedHeap::heap()->reserved().start()),
891 UseCompressedOops ? p2i(CompressedOops::end()) :
892 p2i((address)G1CollectedHeap::heap()->reserved().end()));
893 }
894
895 archive_subgraphs();
896 }
897
898 init_seen_objects_table();
899 Universe::archive_exception_instances();
900 }
901
902 void HeapShared::end_scanning_for_oops() {
903 archive_strings();
904 delete_seen_objects_table();
905 }
906
907 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
908 {
909 NoSafepointVerifier nsv;
910 if (!SkipArchiveHeapVerification) {
911 CDSHeapVerifier::verify();
912 }
913 check_special_subgraph_classes();
914 }
915
916 StringTable::write_shared_table();
917 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
918 for (int i = 0; i < _pending_roots->length(); i++) {
919 roots->append(_pending_roots->at(i).resolve());
920 }
921 ArchiveHeapWriter::write(roots, heap_info);
922 delete roots;
923
924 ArchiveBuilder::OtherROAllocMark mark;
925 write_subgraph_info_table();
926 }
927
928 void HeapShared::scan_java_mirror(oop orig_mirror) {
929 oop m = scratch_java_mirror(orig_mirror);
930 if (m != nullptr) { // nullptr if for custom class loader
931 copy_java_mirror(orig_mirror, m);
932 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
933 assert(success, "sanity");
934
935 oop extra;
936 if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
937 success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
938 assert(success, "sanity");
939 }
940 }
941 }
942
943 void HeapShared::scan_java_class(Klass* orig_k) {
944 scan_java_mirror(orig_k->java_mirror());
945
946 if (orig_k->is_instance_klass()) {
947 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
948 orig_ik->constants()->prepare_resolved_references_for_archiving();
949 objArrayOop rr = get_archived_resolved_references(orig_ik);
950 if (rr != nullptr) {
951 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
952 assert(success, "must be");
953 }
954 }
955 }
956
957 void HeapShared::archive_subgraphs() {
958 assert(CDSConfig::is_dumping_heap(), "must be");
959
1314 which, k->external_name());
1315 FlagSetting fs1(VerifyBeforeGC, true);
1316 FlagSetting fs2(VerifyDuringGC, true);
1317 FlagSetting fs3(VerifyAfterGC, true);
1318 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1319 }
1320 }
1321 }
1322
1323 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1324 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1325 //
1326 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1327 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1328 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1329 void HeapShared::resolve_classes(JavaThread* current) {
1330 assert(CDSConfig::is_using_archive(), "runtime only!");
1331 if (!ArchiveHeapLoader::is_in_use()) {
1332 return; // nothing to do
1333 }
1334
1335 if (!CDSConfig::is_using_aot_linked_classes()) {
1336 assert( _run_time_special_subgraph != nullptr, "must be");
1337 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1338 if (klasses != nullptr) {
1339 for (int i = 0; i < klasses->length(); i++) {
1340 Klass* k = klasses->at(i);
1341 ExceptionMark em(current); // no exception can happen here
1342 resolve_or_init(k, /*do_init*/false, current);
1343 }
1344 }
1345 }
1346
1347 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1348 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1349 }
1350
1351 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1352 for (int i = 0; fields[i].valid(); i++) {
1353 ArchivableStaticFieldInfo* info = &fields[i];
1354 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1355 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1356 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1357 resolve_classes_for_subgraph_of(current, k);
1358 }
1359 }
1360
1361 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1362 JavaThread* THREAD = current;
1363 ExceptionMark em(THREAD);
1364 const ArchivedKlassSubGraphInfoRecord* record =
1365 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1366 if (HAS_PENDING_EXCEPTION) {
1702 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1703 PointsToOopsChecker points_to_oops_checker;
1704 obj->oop_iterate(&points_to_oops_checker);
1705 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1706 }
1707
1708 void HeapShared::init_box_classes(TRAPS) {
1709 if (ArchiveHeapLoader::is_in_use()) {
1710 vmClasses::Boolean_klass()->initialize(CHECK);
1711 vmClasses::Character_klass()->initialize(CHECK);
1712 vmClasses::Float_klass()->initialize(CHECK);
1713 vmClasses::Double_klass()->initialize(CHECK);
1714 vmClasses::Byte_klass()->initialize(CHECK);
1715 vmClasses::Short_klass()->initialize(CHECK);
1716 vmClasses::Integer_klass()->initialize(CHECK);
1717 vmClasses::Long_klass()->initialize(CHECK);
1718 vmClasses::Void_klass()->initialize(CHECK);
1719 }
1720 }
1721
1722 void HeapShared::exit_on_error() {
1723 if (_context != nullptr) {
1724 ResourceMark rm;
1725 LogStream ls(Log(cds, heap)::error());
1726 ls.print_cr("Context");
1727 for (int i = 0; i < _context->length(); i++) {
1728 const char* s = _context->at(i);
1729 ls.print_cr("- %s", s);
1730 }
1731 }
1732 debug_trace();
1733 AOTMetaspace::unrecoverable_writing_error();
1734 }
1735
1736 // (1) If orig_obj has not been archived yet, archive it.
1737 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1738 // trace all objects that are reachable from it, and make sure these objects are archived.
1739 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1740 // were already archived when this function is called)
1741 bool HeapShared::archive_reachable_objects_from(int level,
1742 KlassSubGraphInfo* subgraph_info,
1743 oop orig_obj) {
1744 assert(orig_obj != nullptr, "must be");
1745 PendingOopStack stack;
1746 stack.push(PendingOop(orig_obj, nullptr, level));
1747
1748 while (stack.length() > 0) {
1749 PendingOop po = stack.pop();
1750 _object_being_archived = po;
1751 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1752 _object_being_archived = PendingOop();
1753
1754 if (!status) {
1755 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1982 verify_subgraph_from(f);
1983 }
1984 }
1985
1986 void HeapShared::verify_subgraph_from(oop orig_obj) {
1987 if (!has_been_archived(orig_obj)) {
1988 // It's OK for the root of a subgraph to be not archived. See comments in
1989 // archive_reachable_objects_from().
1990 return;
1991 }
1992
1993 // Verify that all objects reachable from orig_obj are archived.
1994 init_seen_objects_table();
1995 verify_reachable_objects_from(orig_obj);
1996 delete_seen_objects_table();
1997 }
1998
1999 void HeapShared::verify_reachable_objects_from(oop obj) {
2000 _num_total_verifications ++;
2001 if (java_lang_Class::is_instance(obj)) {
2002 Klass* k = java_lang_Class::as_Klass(obj);
2003 if (RegeneratedClasses::has_been_regenerated(k)) {
2004 k = RegeneratedClasses::get_regenerated_object(k);
2005 obj = k->java_mirror();
2006 }
2007 obj = scratch_java_mirror(obj);
2008 assert(obj != nullptr, "must be");
2009 }
2010 if (!has_been_seen_during_subgraph_recording(obj)) {
2011 set_has_been_seen_during_subgraph_recording(obj);
2012 assert(has_been_archived(obj), "must be");
2013 VerifySharedOopClosure walker;
2014 obj->oop_iterate(&walker);
2015 }
2016 }
2017 #endif
2018
2019 void HeapShared::check_special_subgraph_classes() {
2020 if (CDSConfig::is_initing_classes_at_dump_time()) {
2021 // We can have aot-initialized classes (such as Enums) that can reference objects
2022 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2023 // aot-initialize classes that are "safe".
2024 //
2025 // TODO: we need an automatic tool that checks the safety of aot-initialized
2026 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2299
2300 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2301 bool is_full_module_graph) {
2302 _num_total_subgraph_recordings = 0;
2303 _num_total_walked_objs = 0;
2304 _num_total_archived_objs = 0;
2305 _num_total_recorded_klasses = 0;
2306 _num_total_verifications = 0;
2307
2308 // For each class X that has one or more archived fields:
2309 // [1] Dump the subgraph of each archived field
2310 // [2] Create a list of all the class of the objects that can be reached
2311 // by any of these static fields.
2312 // At runtime, these classes are initialized before X's archived fields
2313 // are restored by HeapShared::initialize_from_archived_subgraph().
2314 for (int i = 0; fields[i].valid(); ) {
2315 ArchivableStaticFieldInfo* info = &fields[i];
2316 const char* klass_name = info->klass_name;
2317 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2318
2319 ContextMark cm(klass_name);
2320 // If you have specified consecutive fields of the same klass in
2321 // fields[], these will be archived in the same
2322 // {start_recording_subgraph ... done_recording_subgraph} pass to
2323 // save time.
2324 for (; fields[i].valid(); i++) {
2325 ArchivableStaticFieldInfo* f = &fields[i];
2326 if (f->klass_name != klass_name) {
2327 break;
2328 }
2329
2330 ContextMark cm(f->field_name);
2331 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2332 f->offset, f->field_name);
2333 }
2334 done_recording_subgraph(info->klass, klass_name);
2335 }
2336
2337 log_info(aot, heap)("Archived subgraph records = %d",
2338 _num_total_subgraph_recordings);
2339 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2340 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2341 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2342
2343 #ifndef PRODUCT
2344 for (int i = 0; fields[i].valid(); i++) {
2345 ArchivableStaticFieldInfo* f = &fields[i];
2346 verify_subgraph_from_static_field(f->klass, f->offset);
2347 }
2348 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2349 #endif
2350 }
|