12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/archiveBuilder.hpp"
29 #include "cds/archiveHeapLoader.hpp"
30 #include "cds/archiveHeapWriter.hpp"
31 #include "cds/archiveUtils.hpp"
32 #include "cds/cdsConfig.hpp"
33 #include "cds/cdsEnumKlass.hpp"
34 #include "cds/cdsHeapVerifier.hpp"
35 #include "cds/heapShared.hpp"
36 #include "cds/metaspaceShared.hpp"
37 #include "classfile/classLoaderData.hpp"
38 #include "classfile/classLoaderExt.hpp"
39 #include "classfile/javaClasses.inline.hpp"
40 #include "classfile/modules.hpp"
41 #include "classfile/stringTable.hpp"
42 #include "classfile/symbolTable.hpp"
43 #include "classfile/systemDictionary.hpp"
44 #include "classfile/systemDictionaryShared.hpp"
45 #include "classfile/vmClasses.hpp"
46 #include "classfile/vmSymbols.hpp"
47 #include "gc/shared/collectedHeap.hpp"
48 #include "gc/shared/gcLocker.hpp"
49 #include "gc/shared/gcVMOperations.hpp"
50 #include "logging/log.hpp"
51 #include "logging/logStream.hpp"
70 #include "gc/g1/g1CollectedHeap.hpp"
71 #endif
72
73 #if INCLUDE_CDS_JAVA_HEAP
74
75 struct ArchivableStaticFieldInfo {
76 const char* klass_name;
77 const char* field_name;
78 InstanceKlass* klass;
79 int offset;
80 BasicType type;
81
82 ArchivableStaticFieldInfo(const char* k, const char* f)
83 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
84
85 bool valid() {
86 return klass_name != nullptr;
87 }
88 };
89
90 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
91
92 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
93 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
94 size_t HeapShared::_total_obj_count;
95 size_t HeapShared::_total_obj_size;
96
97 #ifndef PRODUCT
98 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
99 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
100 static const char* _test_class_name = nullptr;
101 static Klass* _test_class = nullptr;
102 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
103 #endif
104
105
106 //
107 // If you add new entries to the following tables, you should know what you're doing!
108 //
109
110 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
111 {"java/lang/Integer$IntegerCache", "archivedCache"},
112 {"java/lang/Long$LongCache", "archivedCache"},
113 {"java/lang/Byte$ByteCache", "archivedCache"},
114 {"java/lang/Short$ShortCache", "archivedCache"},
115 {"java/lang/Character$CharacterCache", "archivedCache"},
116 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
117 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
118 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
119 {"java/util/ImmutableCollections", "archivedObjects"},
120 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
121 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
122 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
123
124 #ifndef PRODUCT
125 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
126 #endif
127 {nullptr, nullptr},
128 };
129
130 // full module graph
131 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
132 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
133 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
134 {"java/lang/Module$ArchivedData", "archivedData"},
135 {nullptr, nullptr},
136 };
137
138 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
139 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
140 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
141 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
142 int HeapShared::_root_segment_max_size_elems;
143 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
144 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
145
146 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
147 for (int i = 0; fields[i].valid(); i++) {
148 if (fields[i].klass == ik) {
149 return true;
150 }
151 }
152 return false;
153 }
154
155 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
156 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
157 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
158 }
159
160 unsigned HeapShared::oop_hash(oop const& p) {
208 CHECK);
209 Handle boot_loader(THREAD, result.get_oop());
210 reset_states(boot_loader(), CHECK);
211 }
212
213 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
214
215 bool HeapShared::has_been_archived(oop obj) {
216 assert(CDSConfig::is_dumping_heap(), "dump-time only");
217 return archived_object_cache()->get(obj) != nullptr;
218 }
219
220 int HeapShared::append_root(oop obj) {
221 assert(CDSConfig::is_dumping_heap(), "dump-time only");
222 if (obj != nullptr) {
223 assert(has_been_archived(obj), "must be");
224 }
225 // No GC should happen since we aren't scanning _pending_roots.
226 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
227
228 return _pending_roots->append(obj);
229 }
230
231 objArrayOop HeapShared::root_segment(int segment_idx) {
232 if (CDSConfig::is_dumping_heap()) {
233 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
234 } else {
235 assert(CDSConfig::is_using_archive(), "must be");
236 }
237
238 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
239 assert(segment != nullptr, "should have been initialized");
240 return segment;
241 }
242
243 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
244 assert(_root_segment_max_size_elems > 0, "sanity");
245
246 // Try to avoid divisions for the common case.
247 if (idx < _root_segment_max_size_elems) {
248 seg_idx = 0;
249 int_idx = idx;
250 } else {
251 seg_idx = idx / _root_segment_max_size_elems;
252 int_idx = idx % _root_segment_max_size_elems;
253 }
254
255 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
256 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
257 }
258
259 // Returns an objArray that contains all the roots of the archived objects
260 oop HeapShared::get_root(int index, bool clear) {
261 assert(index >= 0, "sanity");
262 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
387 OopHandle* handle = get(ptr);
388 if (handle != nullptr) {
389 handle->release(Universe::vm_global());
390 remove(ptr);
391 }
392 }
393 };
394
395 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
396 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
397 _scratch_objects_table->set_oop(src, dest);
398 }
399 }
400
401 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
402 return (objArrayOop)_scratch_objects_table->get_oop(src);
403 }
404
405 void HeapShared::init_dumping() {
406 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
407 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
408 }
409
410 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
411 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
412 BasicType bt = (BasicType)i;
413 if (!is_reference_type(bt)) {
414 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
415 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
416 }
417 }
418 }
419
420 // Given java_mirror that represents a (primitive or reference) type T,
421 // return the "scratch" version that represents the same type T.
422 // Note that if java_mirror will be returned if it's already a
423 // scratch mirror.
424 //
425 // See java_lang_Class::create_scratch_mirror() for more info.
426 oop HeapShared::scratch_java_mirror(oop java_mirror) {
427 assert(java_lang_Class::is_instance(java_mirror), "must be");
428
429 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
430 BasicType bt = (BasicType)i;
431 if (!is_reference_type(bt)) {
432 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
433 return java_mirror;
434 }
435 }
436 }
437
438 if (java_lang_Class::is_primitive(java_mirror)) {
439 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
440 } else {
441 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
442 }
443 }
444
445 oop HeapShared::scratch_java_mirror(BasicType t) {
446 assert((uint)t < T_VOID+1, "range check");
447 assert(!is_reference_type(t), "sanity");
448 return _scratch_basic_type_mirrors[t].resolve();
449 }
450
451 oop HeapShared::scratch_java_mirror(Klass* k) {
452 return _scratch_objects_table->get_oop(k);
453 }
454
455 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
456 _scratch_objects_table->set_oop(k, mirror);
457 }
458
459 void HeapShared::remove_scratch_objects(Klass* k) {
460 // Klass is being deallocated. Java mirror can still be alive, and it should not
461 // point to dead klass. We need to break the link from mirror to the Klass.
462 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
463 oop mirror = _scratch_objects_table->get_oop(k);
464 if (mirror != nullptr) {
465 java_lang_Class::set_klass(mirror, nullptr);
466 }
467 _scratch_objects_table->remove_oop(k);
468 if (k->is_instance_klass()) {
469 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
470 }
471 }
472
473 //TODO: we eventually want a more direct test for these kinds of things.
474 //For example the JVM could record some bit of context from the creation
475 //of the klass, such as who called the hidden class factory. Using
476 //string compares on names is fragile and will break as soon as somebody
477 //changes the names in the JDK code. See discussion in JDK-8342481 for
478 //related ideas about marking AOT-related classes.
479 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
480 return ik->is_hidden() &&
481 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
482 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
483 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
484 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
485 }
486
487 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
488 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
489 }
490
634 assert(info != nullptr, "must be");
635 has_oop_pointers = info->has_oop_pointers();
636 has_native_pointers = info->has_native_pointers();
637 }
638
639 void HeapShared::set_has_native_pointers(oop src_obj) {
640 CachedOopInfo* info = archived_object_cache()->get(src_obj);
641 assert(info != nullptr, "must be");
642 info->set_has_native_pointers();
643 }
644
645 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
646 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
647 void HeapShared::start_scanning_for_oops() {
648 {
649 NoSafepointVerifier nsv;
650
651 // The special subgraph doesn't belong to any class. We use Object_klass() here just
652 // for convenience.
653 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
654
655 // Cache for recording where the archived objects are copied to
656 create_archived_object_cache();
657
658 if (UseCompressedOops || UseG1GC) {
659 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
660 UseCompressedOops ? p2i(CompressedOops::begin()) :
661 p2i((address)G1CollectedHeap::heap()->reserved().start()),
662 UseCompressedOops ? p2i(CompressedOops::end()) :
663 p2i((address)G1CollectedHeap::heap()->reserved().end()));
664 }
665
666 archive_subgraphs();
667 }
668
669 init_seen_objects_table();
670 Universe::archive_exception_instances();
671 }
672
673 void HeapShared::end_scanning_for_oops() {
674 archive_strings();
675 delete_seen_objects_table();
676 }
677
678 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
679 {
680 NoSafepointVerifier nsv;
681 CDSHeapVerifier::verify();
682 check_special_subgraph_classes();
683 }
684
685 StringTable::write_shared_table(_dumped_interned_strings);
686 ArchiveHeapWriter::write(_pending_roots, heap_info);
687
688 ArchiveBuilder::OtherROAllocMark mark;
689 write_subgraph_info_table();
690 }
691
692 void HeapShared::scan_java_mirror(oop orig_mirror) {
693 oop m = scratch_java_mirror(orig_mirror);
694 if (m != nullptr) { // nullptr if for custom class loader
695 copy_java_mirror_hashcode(orig_mirror, m);
696 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
697 assert(success, "sanity");
698 }
699 }
700
701 void HeapShared::scan_java_class(Klass* orig_k) {
702 scan_java_mirror(orig_k->java_mirror());
703
704 if (orig_k->is_instance_klass()) {
705 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
706 orig_ik->constants()->prepare_resolved_references_for_archiving();
1074 which, k->external_name());
1075 FlagSetting fs1(VerifyBeforeGC, true);
1076 FlagSetting fs2(VerifyDuringGC, true);
1077 FlagSetting fs3(VerifyAfterGC, true);
1078 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1079 }
1080 }
1081 }
1082
1083 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1084 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1085 //
1086 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1087 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1088 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1089 void HeapShared::resolve_classes(JavaThread* current) {
1090 assert(CDSConfig::is_using_archive(), "runtime only!");
1091 if (!ArchiveHeapLoader::is_in_use()) {
1092 return; // nothing to do
1093 }
1094 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1095 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1096 }
1097
1098 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1099 for (int i = 0; fields[i].valid(); i++) {
1100 ArchivableStaticFieldInfo* info = &fields[i];
1101 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1102 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1103 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1104 resolve_classes_for_subgraph_of(current, k);
1105 }
1106 }
1107
1108 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1109 JavaThread* THREAD = current;
1110 ExceptionMark em(THREAD);
1111 const ArchivedKlassSubGraphInfoRecord* record =
1112 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1113 if (HAS_PENDING_EXCEPTION) {
1442 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1443 PointsToOopsChecker points_to_oops_checker;
1444 obj->oop_iterate(&points_to_oops_checker);
1445 return CachedOopInfo(referrer, points_to_oops_checker.result());
1446 }
1447
1448 void HeapShared::init_box_classes(TRAPS) {
1449 if (ArchiveHeapLoader::is_in_use()) {
1450 vmClasses::Boolean_klass()->initialize(CHECK);
1451 vmClasses::Character_klass()->initialize(CHECK);
1452 vmClasses::Float_klass()->initialize(CHECK);
1453 vmClasses::Double_klass()->initialize(CHECK);
1454 vmClasses::Byte_klass()->initialize(CHECK);
1455 vmClasses::Short_klass()->initialize(CHECK);
1456 vmClasses::Integer_klass()->initialize(CHECK);
1457 vmClasses::Long_klass()->initialize(CHECK);
1458 vmClasses::Void_klass()->initialize(CHECK);
1459 }
1460 }
1461
1462 // (1) If orig_obj has not been archived yet, archive it.
1463 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1464 // trace all objects that are reachable from it, and make sure these objects are archived.
1465 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1466 // were already archived when this function is called)
1467 bool HeapShared::archive_reachable_objects_from(int level,
1468 KlassSubGraphInfo* subgraph_info,
1469 oop orig_obj) {
1470 assert(orig_obj != nullptr, "must be");
1471 PendingOopStack stack;
1472 stack.push(PendingOop(orig_obj, nullptr, level));
1473
1474 while (stack.length() > 0) {
1475 PendingOop po = stack.pop();
1476 _object_being_archived = po;
1477 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1478 _object_being_archived = PendingOop();
1479
1480 if (!status) {
1481 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1482 // as the Java code will take care of initializing this field dynamically.
1483 assert(level == 1, "VM should have exited with unarchivable objects for _level > 1");
1484 return false;
1485 }
1486 }
1487
1488 return true;
1489 }
1490
1491 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
1492 oop orig_obj, oop referrer) {
1493 assert(orig_obj != nullptr, "must be");
1494 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1495 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1496 // If you get an error here, you probably made a change in the JDK library that has added
1497 // these objects that are referenced (directly or indirectly) by static fields.
1498 ResourceMark rm;
1499 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1500 debug_trace();
1501 MetaspaceShared::unrecoverable_writing_error();
1502 }
1503
1504 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1505 ResourceMark rm;
1506 LogTarget(Debug, cds, heap) log;
1507 LogStream out(log);
1508 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1509 Klass* k = java_lang_Class::as_Klass(orig_obj);
1510 if (k != nullptr) {
1511 out.print("%s", k->external_name());
1512 } else {
1513 out.print("primitive");
1514 }
1515 out.print_cr("; scratch mirror = " PTR_FORMAT,
1516 p2i(scratch_java_mirror(orig_obj)));
1517 }
1518
1519 if (CDSConfig::is_initing_classes_at_dump_time()) {
1520 if (java_lang_Class::is_instance(orig_obj)) {
1521 orig_obj = scratch_java_mirror(orig_obj);
1557
1558 bool already_archived = has_been_archived(orig_obj);
1559 bool record_klasses_only = already_archived;
1560 if (!already_archived) {
1561 ++_num_new_archived_objs;
1562 if (!archive_object(orig_obj, referrer, subgraph_info)) {
1563 // Skip archiving the sub-graph referenced from the current entry field.
1564 ResourceMark rm;
1565 log_error(cds, heap)(
1566 "Cannot archive the sub-graph referenced from %s object ("
1567 PTR_FORMAT ") size %zu, skipped.",
1568 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1569 if (level == 1) {
1570 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1571 // as the Java code will take care of initializing this field dynamically.
1572 return false;
1573 } else {
1574 // We don't know how to handle an object that has been archived, but some of its reachable
1575 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1576 // we have a real use case.
1577 MetaspaceShared::unrecoverable_writing_error();
1578 }
1579 }
1580 }
1581
1582 Klass *orig_k = orig_obj->klass();
1583 subgraph_info->add_subgraph_object_klass(orig_k);
1584
1585 {
1586 // Find all the oops that are referenced by orig_obj, push them onto the stack
1587 // so we can work on them next.
1588 ResourceMark rm;
1589 ReferentPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1590 orig_obj->oop_iterate(&pusher);
1591 }
1592
1593 if (CDSConfig::is_initing_classes_at_dump_time()) {
1594 // The enum klasses are archived with aot-initialized mirror.
1595 // See AOTClassInitializer::can_archive_initialized_mirror().
1596 } else {
1597 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2011
2012 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2013 bool is_full_module_graph) {
2014 _num_total_subgraph_recordings = 0;
2015 _num_total_walked_objs = 0;
2016 _num_total_archived_objs = 0;
2017 _num_total_recorded_klasses = 0;
2018 _num_total_verifications = 0;
2019
2020 // For each class X that has one or more archived fields:
2021 // [1] Dump the subgraph of each archived field
2022 // [2] Create a list of all the class of the objects that can be reached
2023 // by any of these static fields.
2024 // At runtime, these classes are initialized before X's archived fields
2025 // are restored by HeapShared::initialize_from_archived_subgraph().
2026 for (int i = 0; fields[i].valid(); ) {
2027 ArchivableStaticFieldInfo* info = &fields[i];
2028 const char* klass_name = info->klass_name;
2029 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2030
2031 // If you have specified consecutive fields of the same klass in
2032 // fields[], these will be archived in the same
2033 // {start_recording_subgraph ... done_recording_subgraph} pass to
2034 // save time.
2035 for (; fields[i].valid(); i++) {
2036 ArchivableStaticFieldInfo* f = &fields[i];
2037 if (f->klass_name != klass_name) {
2038 break;
2039 }
2040
2041 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2042 f->offset, f->field_name);
2043 }
2044 done_recording_subgraph(info->klass, klass_name);
2045 }
2046
2047 log_info(cds, heap)("Archived subgraph records = %d",
2048 _num_total_subgraph_recordings);
2049 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2050 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2051 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2052
2053 #ifndef PRODUCT
2054 for (int i = 0; fields[i].valid(); i++) {
2055 ArchivableStaticFieldInfo* f = &fields[i];
2056 verify_subgraph_from_static_field(f->klass, f->offset);
2057 }
2058 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2059 #endif
2060 }
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/archiveBuilder.hpp"
29 #include "cds/archiveHeapLoader.hpp"
30 #include "cds/archiveHeapWriter.hpp"
31 #include "cds/archiveUtils.hpp"
32 #include "cds/cdsAccess.hpp"
33 #include "cds/cdsConfig.hpp"
34 #include "cds/cdsEnumKlass.hpp"
35 #include "cds/cdsHeapVerifier.hpp"
36 #include "cds/heapShared.hpp"
37 #include "cds/metaspaceShared.hpp"
38 #include "classfile/classLoaderData.hpp"
39 #include "classfile/classLoaderExt.hpp"
40 #include "classfile/javaClasses.inline.hpp"
41 #include "classfile/modules.hpp"
42 #include "classfile/stringTable.hpp"
43 #include "classfile/symbolTable.hpp"
44 #include "classfile/systemDictionary.hpp"
45 #include "classfile/systemDictionaryShared.hpp"
46 #include "classfile/vmClasses.hpp"
47 #include "classfile/vmSymbols.hpp"
48 #include "gc/shared/collectedHeap.hpp"
49 #include "gc/shared/gcLocker.hpp"
50 #include "gc/shared/gcVMOperations.hpp"
51 #include "logging/log.hpp"
52 #include "logging/logStream.hpp"
71 #include "gc/g1/g1CollectedHeap.hpp"
72 #endif
73
74 #if INCLUDE_CDS_JAVA_HEAP
75
76 struct ArchivableStaticFieldInfo {
77 const char* klass_name;
78 const char* field_name;
79 InstanceKlass* klass;
80 int offset;
81 BasicType type;
82
83 ArchivableStaticFieldInfo(const char* k, const char* f)
84 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
85
86 bool valid() {
87 return klass_name != nullptr;
88 }
89 };
90
91 class HeapShared::ContextMark : public StackObj {
92 ResourceMark rm;
93 public:
94 ContextMark(const char* c) : rm{} {
95 _context->push(c);
96 }
97 ~ContextMark() {
98 _context->pop();
99 }
100 };
101
102 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
103
104 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
105 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
106 size_t HeapShared::_total_obj_count;
107 size_t HeapShared::_total_obj_size;
108
109 #ifndef PRODUCT
110 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
111 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
112 static const char* _test_class_name = nullptr;
113 static Klass* _test_class = nullptr;
114 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
115 #endif
116
117
118 //
119 // If you add new entries to the following tables, you should know what you're doing!
120 //
121
122 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
123 {"java/lang/Integer$IntegerCache", "archivedCache"},
124 {"java/lang/Long$LongCache", "archivedCache"},
125 {"java/lang/Byte$ByteCache", "archivedCache"},
126 {"java/lang/Short$ShortCache", "archivedCache"},
127 {"java/lang/Character$CharacterCache", "archivedCache"},
128 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
129 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
130 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
131 {"java/util/ImmutableCollections", "archivedObjects"},
132 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
133 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
134 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
135 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
136
137 #ifndef PRODUCT
138 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
139 #endif
140 {nullptr, nullptr},
141 };
142
143 // full module graph
144 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
145 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
146 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
147 {"java/lang/Module$ArchivedData", "archivedData"},
148 {nullptr, nullptr},
149 };
150
151 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
152 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
153 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
154 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
155 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
156 int HeapShared::_root_segment_max_size_elems;
157 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
158 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
159
160 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
161 for (int i = 0; fields[i].valid(); i++) {
162 if (fields[i].klass == ik) {
163 return true;
164 }
165 }
166 return false;
167 }
168
169 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
170 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
171 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
172 }
173
174 unsigned HeapShared::oop_hash(oop const& p) {
222 CHECK);
223 Handle boot_loader(THREAD, result.get_oop());
224 reset_states(boot_loader(), CHECK);
225 }
226
227 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
228
229 bool HeapShared::has_been_archived(oop obj) {
230 assert(CDSConfig::is_dumping_heap(), "dump-time only");
231 return archived_object_cache()->get(obj) != nullptr;
232 }
233
234 int HeapShared::append_root(oop obj) {
235 assert(CDSConfig::is_dumping_heap(), "dump-time only");
236 if (obj != nullptr) {
237 assert(has_been_archived(obj), "must be");
238 }
239 // No GC should happen since we aren't scanning _pending_roots.
240 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
241
242 OopHandle oh(Universe::vm_global(), obj);
243 return _pending_roots->append(oh);
244 }
245
246 objArrayOop HeapShared::root_segment(int segment_idx) {
247 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
248 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
249 } else {
250 assert(CDSConfig::is_using_archive(), "must be");
251 }
252
253 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
254 assert(segment != nullptr, "should have been initialized");
255 return segment;
256 }
257
258 inline unsigned int oop_handle_hash(const OopHandle& oh) {
259 oop o = oh.resolve();
260 if (o == nullptr) {
261 return 0;
262 } else {
263 return o->identity_hash();
264 }
265 }
266
267 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
268 return a.resolve() == b.resolve();
269 }
270
271 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
272 36137, // prime number
273 AnyObj::C_HEAP,
274 mtClassShared,
275 oop_handle_hash,
276 oop_handle_equals> {};
277
278 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
279
280 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
281 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
282 if (_orig_to_scratch_object_table == nullptr) {
283 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
284 }
285
286 OopHandle orig_h(Universe::vm_global(), orig_obj);
287 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
288 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
289 }
290
291 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
292 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
293 if (_orig_to_scratch_object_table != nullptr) {
294 OopHandle orig(&orig_obj);
295 OopHandle* v = _orig_to_scratch_object_table->get(orig);
296 if (v != nullptr) {
297 return v->resolve();
298 }
299 }
300 return nullptr;
301 }
302
303 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
304 // to Strings and MH oops.
305 //
306 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
307 // and are accssed vis CDSAccess::get_archived_object(int).
308 struct PermanentOopInfo {
309 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
310 int _heap_offset; // Offset of the object from the bottom of the archived heap.
311 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
312 };
313
314 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
315 36137, // prime number
316 AnyObj::C_HEAP,
317 mtClassShared,
318 oop_handle_hash,
319 oop_handle_equals> {};
320
321 static int _dumptime_permanent_oop_count = 0;
322 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
323 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
324
325 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
326 // so we can remember their offset (from the bottom of the archived heap).
327 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
328 assert_at_safepoint();
329 if (_dumptime_permanent_oop_table == nullptr) {
330 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
331 }
332
333 PermanentOopInfo info(-1, offset);
334 OopHandle oh(Universe::vm_global(), obj);
335 _dumptime_permanent_oop_table->put_when_absent(oh, info);
336 }
337
338 // A permanent index is assigned to an archived object ONLY when
339 // the AOT compiler calls this function.
340 int HeapShared::get_archived_object_permanent_index(oop obj) {
341 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
342
343 if (!CDSConfig::is_dumping_heap()) {
344 return -1; // Called by the Leyden old workflow
345 }
346 if (_dumptime_permanent_oop_table == nullptr) {
347 return -1;
348 }
349
350 if (_orig_to_scratch_object_table != nullptr) {
351 OopHandle orig(&obj);
352 OopHandle* v = _orig_to_scratch_object_table->get(orig);
353 if (v != nullptr) {
354 obj = v->resolve();
355 }
356 }
357
358 OopHandle tmp(&obj);
359 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
360 if (info == nullptr) {
361 return -1;
362 } else {
363 if (info->_index < 0) {
364 info->_index = _dumptime_permanent_oop_count++;
365 }
366 return info->_index;
367 }
368 }
369
370 oop HeapShared::get_archived_object(int permanent_index) {
371 assert(permanent_index >= 0, "sanity");
372 assert(ArchiveHeapLoader::is_in_use(), "sanity");
373 assert(_runtime_permanent_oops != nullptr, "sanity");
374
375 return _runtime_permanent_oops->at(permanent_index).resolve();
376 }
377
378 // Remember all archived heap objects that have a permanent index.
379 // table[i] = offset of oop whose permanent index is i.
380 void CachedCodeDirectoryInternal::dumptime_init_internal() {
381 const int count = _dumptime_permanent_oop_count;
382 if (count == 0) {
383 // Avoid confusing CDS code with zero-sized tables, just return.
384 log_info(cds)("No permanent oops");
385 _permanent_oop_count = count;
386 _permanent_oop_offsets = nullptr;
387 return;
388 }
389
390 int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
391 for (int i = 0; i < count; i++) {
392 table[count] = -1;
393 }
394 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
395 int index = info._index;
396 if (index >= 0) {
397 assert(index < count, "sanity");
398 table[index] = info._heap_offset;
399 }
400 return true; // continue
401 });
402
403 for (int i = 0; i < count; i++) {
404 assert(table[i] >= 0, "must be");
405 }
406
407 log_info(cds)("Dumped %d permanent oops", count);
408
409 _permanent_oop_count = count;
410 CDSAccess::set_pointer(&_permanent_oop_offsets, table);
411 }
412
413 // This is called during the bootstrap of the production run, before any GC can happen.
414 // Record each permanent oop in a OopHandle for GC safety.
415 void CachedCodeDirectoryInternal::runtime_init_internal() {
416 int count = _permanent_oop_count;
417 int* table = _permanent_oop_offsets;
418 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
419 for (int i = 0; i < count; i++) {
420 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
421 OopHandle oh(Universe::vm_global(), obj);
422 _runtime_permanent_oops->append(oh);
423 }
424 };
425
426 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
427 assert(_root_segment_max_size_elems > 0, "sanity");
428
429 // Try to avoid divisions for the common case.
430 if (idx < _root_segment_max_size_elems) {
431 seg_idx = 0;
432 int_idx = idx;
433 } else {
434 seg_idx = idx / _root_segment_max_size_elems;
435 int_idx = idx % _root_segment_max_size_elems;
436 }
437
438 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
439 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
440 }
441
442 // Returns an objArray that contains all the roots of the archived objects
443 oop HeapShared::get_root(int index, bool clear) {
444 assert(index >= 0, "sanity");
445 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
570 OopHandle* handle = get(ptr);
571 if (handle != nullptr) {
572 handle->release(Universe::vm_global());
573 remove(ptr);
574 }
575 }
576 };
577
578 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
579 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
580 _scratch_objects_table->set_oop(src, dest);
581 }
582 }
583
584 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
585 return (objArrayOop)_scratch_objects_table->get_oop(src);
586 }
587
588 void HeapShared::init_dumping() {
589 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
590 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
591 }
592
593 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
594 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
595 BasicType bt = (BasicType)i;
596 if (!is_reference_type(bt)) {
597 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
598 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
599 track_scratch_object(Universe::java_mirror(bt), m);
600 }
601 }
602 }
603
604 // Given java_mirror that represents a (primitive or reference) type T,
605 // return the "scratch" version that represents the same type T.
606 // Note that if java_mirror will be returned if it's already a
607 // scratch mirror.
608 //
609 // See java_lang_Class::create_scratch_mirror() for more info.
610 oop HeapShared::scratch_java_mirror(oop java_mirror) {
611 assert(java_lang_Class::is_instance(java_mirror), "must be");
612
613 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
614 BasicType bt = (BasicType)i;
615 if (!is_reference_type(bt)) {
616 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
617 return java_mirror;
618 }
619 }
620 }
621
622 if (java_lang_Class::is_primitive(java_mirror)) {
623 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
624 } else {
625 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
626 }
627 }
628
629 oop HeapShared::scratch_java_mirror(BasicType t) {
630 assert((uint)t < T_VOID+1, "range check");
631 assert(!is_reference_type(t), "sanity");
632 return _scratch_basic_type_mirrors[t].resolve();
633 }
634
635 oop HeapShared::scratch_java_mirror(Klass* k) {
636 return _scratch_objects_table->get_oop(k);
637 }
638
639 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
640 track_scratch_object(k->java_mirror(), mirror);
641 _scratch_objects_table->set_oop(k, mirror);
642 }
643
644 void HeapShared::remove_scratch_objects(Klass* k) {
645 // Klass is being deallocated. Java mirror can still be alive, and it should not
646 // point to dead klass. We need to break the link from mirror to the Klass.
647 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
648 oop mirror = _scratch_objects_table->get_oop(k);
649 if (mirror != nullptr) {
650 java_lang_Class::set_klass(mirror, nullptr);
651 }
652 _scratch_objects_table->remove_oop(k);
653 if (k->is_instance_klass()) {
654 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
655 }
656 if (mirror != nullptr) {
657 OopHandle tmp(&mirror);
658 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
659 if (v != nullptr) {
660 oop scratch_mirror = v->resolve();
661 java_lang_Class::set_klass(scratch_mirror, nullptr);
662 _orig_to_scratch_object_table->remove(tmp);
663 }
664 }
665 }
666
667 //TODO: we eventually want a more direct test for these kinds of things.
668 //For example the JVM could record some bit of context from the creation
669 //of the klass, such as who called the hidden class factory. Using
670 //string compares on names is fragile and will break as soon as somebody
671 //changes the names in the JDK code. See discussion in JDK-8342481 for
672 //related ideas about marking AOT-related classes.
673 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
674 return ik->is_hidden() &&
675 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
676 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
677 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
678 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
679 }
680
681 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
682 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
683 }
684
828 assert(info != nullptr, "must be");
829 has_oop_pointers = info->has_oop_pointers();
830 has_native_pointers = info->has_native_pointers();
831 }
832
833 void HeapShared::set_has_native_pointers(oop src_obj) {
834 CachedOopInfo* info = archived_object_cache()->get(src_obj);
835 assert(info != nullptr, "must be");
836 info->set_has_native_pointers();
837 }
838
839 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
840 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
841 void HeapShared::start_scanning_for_oops() {
842 {
843 NoSafepointVerifier nsv;
844
845 // The special subgraph doesn't belong to any class. We use Object_klass() here just
846 // for convenience.
847 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
848 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
849
850 // Cache for recording where the archived objects are copied to
851 create_archived_object_cache();
852
853 if (UseCompressedOops || UseG1GC) {
854 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
855 UseCompressedOops ? p2i(CompressedOops::begin()) :
856 p2i((address)G1CollectedHeap::heap()->reserved().start()),
857 UseCompressedOops ? p2i(CompressedOops::end()) :
858 p2i((address)G1CollectedHeap::heap()->reserved().end()));
859 }
860
861 archive_subgraphs();
862 }
863
864 init_seen_objects_table();
865 Universe::archive_exception_instances();
866 }
867
868 void HeapShared::end_scanning_for_oops() {
869 archive_strings();
870 delete_seen_objects_table();
871 }
872
873 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
874 {
875 NoSafepointVerifier nsv;
876 if (!SkipArchiveHeapVerification) {
877 CDSHeapVerifier::verify();
878 }
879 check_special_subgraph_classes();
880 }
881
882 StringTable::write_shared_table(_dumped_interned_strings);
883 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
884 for (int i = 0; i < _pending_roots->length(); i++) {
885 roots->append(_pending_roots->at(i).resolve());
886 }
887 ArchiveHeapWriter::write(roots, heap_info);
888 delete roots;
889
890 ArchiveBuilder::OtherROAllocMark mark;
891 write_subgraph_info_table();
892 }
893
894 void HeapShared::scan_java_mirror(oop orig_mirror) {
895 oop m = scratch_java_mirror(orig_mirror);
896 if (m != nullptr) { // nullptr if for custom class loader
897 copy_java_mirror_hashcode(orig_mirror, m);
898 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
899 assert(success, "sanity");
900 }
901 }
902
903 void HeapShared::scan_java_class(Klass* orig_k) {
904 scan_java_mirror(orig_k->java_mirror());
905
906 if (orig_k->is_instance_klass()) {
907 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
908 orig_ik->constants()->prepare_resolved_references_for_archiving();
1276 which, k->external_name());
1277 FlagSetting fs1(VerifyBeforeGC, true);
1278 FlagSetting fs2(VerifyDuringGC, true);
1279 FlagSetting fs3(VerifyAfterGC, true);
1280 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1281 }
1282 }
1283 }
1284
1285 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1286 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1287 //
1288 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1289 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1290 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1291 void HeapShared::resolve_classes(JavaThread* current) {
1292 assert(CDSConfig::is_using_archive(), "runtime only!");
1293 if (!ArchiveHeapLoader::is_in_use()) {
1294 return; // nothing to do
1295 }
1296
1297 if (!CDSConfig::is_using_aot_linked_classes()) {
1298 assert( _run_time_special_subgraph != nullptr, "must be");
1299 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1300 if (klasses != nullptr) {
1301 for (int i = 0; i < klasses->length(); i++) {
1302 Klass* k = klasses->at(i);
1303 ExceptionMark em(current); // no exception can happen here
1304 resolve_or_init(k, /*do_init*/false, current);
1305 }
1306 }
1307 }
1308
1309 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1310 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1311 }
1312
1313 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1314 for (int i = 0; fields[i].valid(); i++) {
1315 ArchivableStaticFieldInfo* info = &fields[i];
1316 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1317 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1318 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1319 resolve_classes_for_subgraph_of(current, k);
1320 }
1321 }
1322
1323 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1324 JavaThread* THREAD = current;
1325 ExceptionMark em(THREAD);
1326 const ArchivedKlassSubGraphInfoRecord* record =
1327 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1328 if (HAS_PENDING_EXCEPTION) {
1657 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1658 PointsToOopsChecker points_to_oops_checker;
1659 obj->oop_iterate(&points_to_oops_checker);
1660 return CachedOopInfo(referrer, points_to_oops_checker.result());
1661 }
1662
1663 void HeapShared::init_box_classes(TRAPS) {
1664 if (ArchiveHeapLoader::is_in_use()) {
1665 vmClasses::Boolean_klass()->initialize(CHECK);
1666 vmClasses::Character_klass()->initialize(CHECK);
1667 vmClasses::Float_klass()->initialize(CHECK);
1668 vmClasses::Double_klass()->initialize(CHECK);
1669 vmClasses::Byte_klass()->initialize(CHECK);
1670 vmClasses::Short_klass()->initialize(CHECK);
1671 vmClasses::Integer_klass()->initialize(CHECK);
1672 vmClasses::Long_klass()->initialize(CHECK);
1673 vmClasses::Void_klass()->initialize(CHECK);
1674 }
1675 }
1676
1677 void HeapShared::exit_on_error() {
1678 if (_context != nullptr) {
1679 ResourceMark rm;
1680 LogStream ls(Log(cds, heap)::error());
1681 ls.print_cr("Context");
1682 for (int i = 0; i < _context->length(); i++) {
1683 const char* s = _context->at(i);
1684 ls.print_cr("- %s", s);
1685 }
1686 }
1687 debug_trace();
1688 MetaspaceShared::unrecoverable_writing_error();
1689 }
1690
1691 // (1) If orig_obj has not been archived yet, archive it.
1692 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1693 // trace all objects that are reachable from it, and make sure these objects are archived.
1694 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1695 // were already archived when this function is called)
1696 bool HeapShared::archive_reachable_objects_from(int level,
1697 KlassSubGraphInfo* subgraph_info,
1698 oop orig_obj) {
1699 assert(orig_obj != nullptr, "must be");
1700 PendingOopStack stack;
1701 stack.push(PendingOop(orig_obj, nullptr, level));
1702
1703 while (stack.length() > 0) {
1704 PendingOop po = stack.pop();
1705 _object_being_archived = po;
1706 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1707 _object_being_archived = PendingOop();
1708
1709 if (!status) {
1710 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1711 // as the Java code will take care of initializing this field dynamically.
1712 assert(level == 1, "VM should have exited with unarchivable objects for _level > 1");
1713 return false;
1714 }
1715 }
1716
1717 return true;
1718 }
1719
1720 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
1721 oop orig_obj, oop referrer) {
1722 assert(orig_obj != nullptr, "must be");
1723 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1724 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1725 // If you get an error here, you probably made a change in the JDK library that has added
1726 // these objects that are referenced (directly or indirectly) by static fields.
1727 ResourceMark rm;
1728 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1729 exit_on_error();
1730 }
1731
1732 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1733 ResourceMark rm;
1734 LogTarget(Debug, cds, heap) log;
1735 LogStream out(log);
1736 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1737 Klass* k = java_lang_Class::as_Klass(orig_obj);
1738 if (k != nullptr) {
1739 out.print("%s", k->external_name());
1740 } else {
1741 out.print("primitive");
1742 }
1743 out.print_cr("; scratch mirror = " PTR_FORMAT,
1744 p2i(scratch_java_mirror(orig_obj)));
1745 }
1746
1747 if (CDSConfig::is_initing_classes_at_dump_time()) {
1748 if (java_lang_Class::is_instance(orig_obj)) {
1749 orig_obj = scratch_java_mirror(orig_obj);
1785
1786 bool already_archived = has_been_archived(orig_obj);
1787 bool record_klasses_only = already_archived;
1788 if (!already_archived) {
1789 ++_num_new_archived_objs;
1790 if (!archive_object(orig_obj, referrer, subgraph_info)) {
1791 // Skip archiving the sub-graph referenced from the current entry field.
1792 ResourceMark rm;
1793 log_error(cds, heap)(
1794 "Cannot archive the sub-graph referenced from %s object ("
1795 PTR_FORMAT ") size %zu, skipped.",
1796 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1797 if (level == 1) {
1798 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1799 // as the Java code will take care of initializing this field dynamically.
1800 return false;
1801 } else {
1802 // We don't know how to handle an object that has been archived, but some of its reachable
1803 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1804 // we have a real use case.
1805 exit_on_error();
1806 }
1807 }
1808 }
1809
1810 Klass *orig_k = orig_obj->klass();
1811 subgraph_info->add_subgraph_object_klass(orig_k);
1812
1813 {
1814 // Find all the oops that are referenced by orig_obj, push them onto the stack
1815 // so we can work on them next.
1816 ResourceMark rm;
1817 ReferentPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1818 orig_obj->oop_iterate(&pusher);
1819 }
1820
1821 if (CDSConfig::is_initing_classes_at_dump_time()) {
1822 // The enum klasses are archived with aot-initialized mirror.
1823 // See AOTClassInitializer::can_archive_initialized_mirror().
1824 } else {
1825 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2239
2240 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2241 bool is_full_module_graph) {
2242 _num_total_subgraph_recordings = 0;
2243 _num_total_walked_objs = 0;
2244 _num_total_archived_objs = 0;
2245 _num_total_recorded_klasses = 0;
2246 _num_total_verifications = 0;
2247
2248 // For each class X that has one or more archived fields:
2249 // [1] Dump the subgraph of each archived field
2250 // [2] Create a list of all the class of the objects that can be reached
2251 // by any of these static fields.
2252 // At runtime, these classes are initialized before X's archived fields
2253 // are restored by HeapShared::initialize_from_archived_subgraph().
2254 for (int i = 0; fields[i].valid(); ) {
2255 ArchivableStaticFieldInfo* info = &fields[i];
2256 const char* klass_name = info->klass_name;
2257 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2258
2259 ContextMark cm(klass_name);
2260 // If you have specified consecutive fields of the same klass in
2261 // fields[], these will be archived in the same
2262 // {start_recording_subgraph ... done_recording_subgraph} pass to
2263 // save time.
2264 for (; fields[i].valid(); i++) {
2265 ArchivableStaticFieldInfo* f = &fields[i];
2266 if (f->klass_name != klass_name) {
2267 break;
2268 }
2269
2270 ContextMark cm(f->field_name);
2271 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2272 f->offset, f->field_name);
2273 }
2274 done_recording_subgraph(info->klass, klass_name);
2275 }
2276
2277 log_info(cds, heap)("Archived subgraph records = %d",
2278 _num_total_subgraph_recordings);
2279 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2280 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2281 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2282
2283 #ifndef PRODUCT
2284 for (int i = 0; fields[i].valid(); i++) {
2285 ArchivableStaticFieldInfo* f = &fields[i];
2286 verify_subgraph_from_static_field(f->klass, f->offset);
2287 }
2288 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2289 #endif
2290 }
|