5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotReferenceObjSupport.hpp"
30 #include "cds/archiveBuilder.hpp"
31 #include "cds/archiveHeapLoader.hpp"
32 #include "cds/archiveHeapWriter.hpp"
33 #include "cds/archiveUtils.hpp"
34 #include "cds/cdsConfig.hpp"
35 #include "cds/cdsEnumKlass.hpp"
36 #include "cds/cdsHeapVerifier.hpp"
37 #include "cds/heapShared.hpp"
38 #include "cds/metaspaceShared.hpp"
39 #include "classfile/classLoaderData.hpp"
40 #include "classfile/classLoaderExt.hpp"
41 #include "classfile/javaClasses.inline.hpp"
42 #include "classfile/modules.hpp"
43 #include "classfile/stringTable.hpp"
44 #include "classfile/symbolTable.hpp"
72 #include "gc/g1/g1CollectedHeap.hpp"
73 #endif
74
75 #if INCLUDE_CDS_JAVA_HEAP
76
77 struct ArchivableStaticFieldInfo {
78 const char* klass_name;
79 const char* field_name;
80 InstanceKlass* klass;
81 int offset;
82 BasicType type;
83
84 ArchivableStaticFieldInfo(const char* k, const char* f)
85 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
86
87 bool valid() {
88 return klass_name != nullptr;
89 }
90 };
91
92 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
93
94 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
95 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
96 size_t HeapShared::_total_obj_count;
97 size_t HeapShared::_total_obj_size;
98
99 #ifndef PRODUCT
100 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
101 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
102 static const char* _test_class_name = nullptr;
103 static Klass* _test_class = nullptr;
104 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
105 #endif
106
107
108 //
109 // If you add new entries to the following tables, you should know what you're doing!
110 //
111
112 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
113 {"java/lang/Integer$IntegerCache", "archivedCache"},
114 {"java/lang/Long$LongCache", "archivedCache"},
115 {"java/lang/Byte$ByteCache", "archivedCache"},
116 {"java/lang/Short$ShortCache", "archivedCache"},
117 {"java/lang/Character$CharacterCache", "archivedCache"},
118 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
119 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
120 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
121 {"java/util/ImmutableCollections", "archivedObjects"},
122 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
123 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
124 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
125
126 #ifndef PRODUCT
127 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
128 #endif
129 {nullptr, nullptr},
130 };
131
132 // full module graph
133 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
134 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
135 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
136 {"java/lang/Module$ArchivedData", "archivedData"},
137 {nullptr, nullptr},
138 };
139
140 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
141 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
142 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
143 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
144 int HeapShared::_root_segment_max_size_elems;
145 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
146 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
147
148 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
149 for (int i = 0; fields[i].valid(); i++) {
150 if (fields[i].klass == ik) {
151 return true;
152 }
153 }
154 return false;
155 }
156
157 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
158 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
159 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
160 }
161
162 unsigned HeapShared::oop_hash(oop const& p) {
210 CHECK);
211 Handle boot_loader(THREAD, result.get_oop());
212 reset_states(boot_loader(), CHECK);
213 }
214
215 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
216
217 bool HeapShared::has_been_archived(oop obj) {
218 assert(CDSConfig::is_dumping_heap(), "dump-time only");
219 return archived_object_cache()->get(obj) != nullptr;
220 }
221
222 int HeapShared::append_root(oop obj) {
223 assert(CDSConfig::is_dumping_heap(), "dump-time only");
224 if (obj != nullptr) {
225 assert(has_been_archived(obj), "must be");
226 }
227 // No GC should happen since we aren't scanning _pending_roots.
228 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
229
230 return _pending_roots->append(obj);
231 }
232
233 objArrayOop HeapShared::root_segment(int segment_idx) {
234 if (CDSConfig::is_dumping_heap()) {
235 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
236 } else {
237 assert(CDSConfig::is_using_archive(), "must be");
238 }
239
240 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
241 assert(segment != nullptr, "should have been initialized");
242 return segment;
243 }
244
245 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
246 assert(_root_segment_max_size_elems > 0, "sanity");
247
248 // Try to avoid divisions for the common case.
249 if (idx < _root_segment_max_size_elems) {
250 seg_idx = 0;
251 int_idx = idx;
252 } else {
253 seg_idx = idx / _root_segment_max_size_elems;
254 int_idx = idx % _root_segment_max_size_elems;
255 }
256
257 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
258 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
259 }
260
261 // Returns an objArray that contains all the roots of the archived objects
262 oop HeapShared::get_root(int index, bool clear) {
263 assert(index >= 0, "sanity");
264 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
389 OopHandle* handle = get(ptr);
390 if (handle != nullptr) {
391 handle->release(Universe::vm_global());
392 remove(ptr);
393 }
394 }
395 };
396
397 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
398 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
399 _scratch_objects_table->set_oop(src, dest);
400 }
401 }
402
403 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
404 return (objArrayOop)_scratch_objects_table->get_oop(src);
405 }
406
407 void HeapShared::init_dumping() {
408 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
409 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
410 }
411
412 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
413 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
414 BasicType bt = (BasicType)i;
415 if (!is_reference_type(bt)) {
416 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
417 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
418 }
419 }
420 }
421
422 // Given java_mirror that represents a (primitive or reference) type T,
423 // return the "scratch" version that represents the same type T.
424 // Note that if java_mirror will be returned if it's already a
425 // scratch mirror.
426 //
427 // See java_lang_Class::create_scratch_mirror() for more info.
428 oop HeapShared::scratch_java_mirror(oop java_mirror) {
429 assert(java_lang_Class::is_instance(java_mirror), "must be");
430
431 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
432 BasicType bt = (BasicType)i;
433 if (!is_reference_type(bt)) {
434 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
435 return java_mirror;
436 }
437 }
438 }
439
440 if (java_lang_Class::is_primitive(java_mirror)) {
441 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
442 } else {
443 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
444 }
445 }
446
447 oop HeapShared::scratch_java_mirror(BasicType t) {
448 assert((uint)t < T_VOID+1, "range check");
449 assert(!is_reference_type(t), "sanity");
450 return _scratch_basic_type_mirrors[t].resolve();
451 }
452
453 oop HeapShared::scratch_java_mirror(Klass* k) {
454 return _scratch_objects_table->get_oop(k);
455 }
456
457 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
458 _scratch_objects_table->set_oop(k, mirror);
459 }
460
461 void HeapShared::remove_scratch_objects(Klass* k) {
462 // Klass is being deallocated. Java mirror can still be alive, and it should not
463 // point to dead klass. We need to break the link from mirror to the Klass.
464 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
465 oop mirror = _scratch_objects_table->get_oop(k);
466 if (mirror != nullptr) {
467 java_lang_Class::set_klass(mirror, nullptr);
468 }
469 _scratch_objects_table->remove_oop(k);
470 if (k->is_instance_klass()) {
471 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
472 }
473 }
474
475 //TODO: we eventually want a more direct test for these kinds of things.
476 //For example the JVM could record some bit of context from the creation
477 //of the klass, such as who called the hidden class factory. Using
478 //string compares on names is fragile and will break as soon as somebody
479 //changes the names in the JDK code. See discussion in JDK-8342481 for
480 //related ideas about marking AOT-related classes.
481 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
482 return ik->is_hidden() &&
483 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
484 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
485 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
486 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
487 }
488
489 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
490 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
491 }
492
633 assert(info != nullptr, "must be");
634 has_oop_pointers = info->has_oop_pointers();
635 has_native_pointers = info->has_native_pointers();
636 }
637
638 void HeapShared::set_has_native_pointers(oop src_obj) {
639 CachedOopInfo* info = archived_object_cache()->get(src_obj);
640 assert(info != nullptr, "must be");
641 info->set_has_native_pointers();
642 }
643
644 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
645 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
646 void HeapShared::start_scanning_for_oops() {
647 {
648 NoSafepointVerifier nsv;
649
650 // The special subgraph doesn't belong to any class. We use Object_klass() here just
651 // for convenience.
652 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
653
654 // Cache for recording where the archived objects are copied to
655 create_archived_object_cache();
656
657 if (UseCompressedOops || UseG1GC) {
658 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
659 UseCompressedOops ? p2i(CompressedOops::begin()) :
660 p2i((address)G1CollectedHeap::heap()->reserved().start()),
661 UseCompressedOops ? p2i(CompressedOops::end()) :
662 p2i((address)G1CollectedHeap::heap()->reserved().end()));
663 }
664
665 archive_subgraphs();
666 }
667
668 init_seen_objects_table();
669 Universe::archive_exception_instances();
670 }
671
672 void HeapShared::end_scanning_for_oops() {
673 archive_strings();
674 delete_seen_objects_table();
675 }
676
677 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
678 {
679 NoSafepointVerifier nsv;
680 CDSHeapVerifier::verify();
681 check_special_subgraph_classes();
682 }
683
684 StringTable::write_shared_table();
685 ArchiveHeapWriter::write(_pending_roots, heap_info);
686
687 ArchiveBuilder::OtherROAllocMark mark;
688 write_subgraph_info_table();
689 }
690
691 void HeapShared::scan_java_mirror(oop orig_mirror) {
692 oop m = scratch_java_mirror(orig_mirror);
693 if (m != nullptr) { // nullptr if for custom class loader
694 copy_java_mirror_hashcode(orig_mirror, m);
695 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
696 assert(success, "sanity");
697 }
698 }
699
700 void HeapShared::scan_java_class(Klass* orig_k) {
701 scan_java_mirror(orig_k->java_mirror());
702
703 if (orig_k->is_instance_klass()) {
704 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
705 orig_ik->constants()->prepare_resolved_references_for_archiving();
1071 which, k->external_name());
1072 FlagSetting fs1(VerifyBeforeGC, true);
1073 FlagSetting fs2(VerifyDuringGC, true);
1074 FlagSetting fs3(VerifyAfterGC, true);
1075 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1076 }
1077 }
1078 }
1079
1080 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1081 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1082 //
1083 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1084 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1085 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1086 void HeapShared::resolve_classes(JavaThread* current) {
1087 assert(CDSConfig::is_using_archive(), "runtime only!");
1088 if (!ArchiveHeapLoader::is_in_use()) {
1089 return; // nothing to do
1090 }
1091 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1092 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1093 }
1094
1095 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1096 for (int i = 0; fields[i].valid(); i++) {
1097 ArchivableStaticFieldInfo* info = &fields[i];
1098 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1099 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1100 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1101 resolve_classes_for_subgraph_of(current, k);
1102 }
1103 }
1104
1105 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1106 JavaThread* THREAD = current;
1107 ExceptionMark em(THREAD);
1108 const ArchivedKlassSubGraphInfoRecord* record =
1109 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1110 if (HAS_PENDING_EXCEPTION) {
1446 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1447 PointsToOopsChecker points_to_oops_checker;
1448 obj->oop_iterate(&points_to_oops_checker);
1449 return CachedOopInfo(referrer, points_to_oops_checker.result());
1450 }
1451
1452 void HeapShared::init_box_classes(TRAPS) {
1453 if (ArchiveHeapLoader::is_in_use()) {
1454 vmClasses::Boolean_klass()->initialize(CHECK);
1455 vmClasses::Character_klass()->initialize(CHECK);
1456 vmClasses::Float_klass()->initialize(CHECK);
1457 vmClasses::Double_klass()->initialize(CHECK);
1458 vmClasses::Byte_klass()->initialize(CHECK);
1459 vmClasses::Short_klass()->initialize(CHECK);
1460 vmClasses::Integer_klass()->initialize(CHECK);
1461 vmClasses::Long_klass()->initialize(CHECK);
1462 vmClasses::Void_klass()->initialize(CHECK);
1463 }
1464 }
1465
1466 // (1) If orig_obj has not been archived yet, archive it.
1467 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1468 // trace all objects that are reachable from it, and make sure these objects are archived.
1469 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1470 // were already archived when this function is called)
1471 bool HeapShared::archive_reachable_objects_from(int level,
1472 KlassSubGraphInfo* subgraph_info,
1473 oop orig_obj) {
1474 assert(orig_obj != nullptr, "must be");
1475 PendingOopStack stack;
1476 stack.push(PendingOop(orig_obj, nullptr, level));
1477
1478 while (stack.length() > 0) {
1479 PendingOop po = stack.pop();
1480 _object_being_archived = po;
1481 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1482 _object_being_archived = PendingOop();
1483
1484 if (!status) {
1485 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1561
1562 bool already_archived = has_been_archived(orig_obj);
1563 bool record_klasses_only = already_archived;
1564 if (!already_archived) {
1565 ++_num_new_archived_objs;
1566 if (!archive_object(orig_obj, referrer, subgraph_info)) {
1567 // Skip archiving the sub-graph referenced from the current entry field.
1568 ResourceMark rm;
1569 log_error(aot, heap)(
1570 "Cannot archive the sub-graph referenced from %s object ("
1571 PTR_FORMAT ") size %zu, skipped.",
1572 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1573 if (level == 1) {
1574 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1575 // as the Java code will take care of initializing this field dynamically.
1576 return false;
1577 } else {
1578 // We don't know how to handle an object that has been archived, but some of its reachable
1579 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1580 // we have a real use case.
1581 MetaspaceShared::unrecoverable_writing_error();
1582 }
1583 }
1584 }
1585
1586 Klass *orig_k = orig_obj->klass();
1587 subgraph_info->add_subgraph_object_klass(orig_k);
1588
1589 {
1590 // Find all the oops that are referenced by orig_obj, push them onto the stack
1591 // so we can work on them next.
1592 ResourceMark rm;
1593 OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1594 orig_obj->oop_iterate(&pusher);
1595 }
1596
1597 if (CDSConfig::is_initing_classes_at_dump_time()) {
1598 // The enum klasses are archived with aot-initialized mirror.
1599 // See AOTClassInitializer::can_archive_initialized_mirror().
1600 } else {
1601 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2015
2016 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2017 bool is_full_module_graph) {
2018 _num_total_subgraph_recordings = 0;
2019 _num_total_walked_objs = 0;
2020 _num_total_archived_objs = 0;
2021 _num_total_recorded_klasses = 0;
2022 _num_total_verifications = 0;
2023
2024 // For each class X that has one or more archived fields:
2025 // [1] Dump the subgraph of each archived field
2026 // [2] Create a list of all the class of the objects that can be reached
2027 // by any of these static fields.
2028 // At runtime, these classes are initialized before X's archived fields
2029 // are restored by HeapShared::initialize_from_archived_subgraph().
2030 for (int i = 0; fields[i].valid(); ) {
2031 ArchivableStaticFieldInfo* info = &fields[i];
2032 const char* klass_name = info->klass_name;
2033 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2034
2035 // If you have specified consecutive fields of the same klass in
2036 // fields[], these will be archived in the same
2037 // {start_recording_subgraph ... done_recording_subgraph} pass to
2038 // save time.
2039 for (; fields[i].valid(); i++) {
2040 ArchivableStaticFieldInfo* f = &fields[i];
2041 if (f->klass_name != klass_name) {
2042 break;
2043 }
2044
2045 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2046 f->offset, f->field_name);
2047 }
2048 done_recording_subgraph(info->klass, klass_name);
2049 }
2050
2051 log_info(aot, heap)("Archived subgraph records = %d",
2052 _num_total_subgraph_recordings);
2053 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2054 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2055 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2056
2057 #ifndef PRODUCT
2058 for (int i = 0; fields[i].valid(); i++) {
2059 ArchivableStaticFieldInfo* f = &fields[i];
2060 verify_subgraph_from_static_field(f->klass, f->offset);
2061 }
2062 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2063 #endif
2064 }
|
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotCacheAccess.hpp"
26 #include "cds/aotArtifactFinder.hpp"
27 #include "cds/aotClassInitializer.hpp"
28 #include "cds/aotClassLocation.hpp"
29 #include "cds/aotLogging.hpp"
30 #include "cds/aotReferenceObjSupport.hpp"
31 #include "cds/archiveBuilder.hpp"
32 #include "cds/archiveHeapLoader.hpp"
33 #include "cds/archiveHeapWriter.hpp"
34 #include "cds/archiveUtils.hpp"
35 #include "cds/cdsConfig.hpp"
36 #include "cds/cdsEnumKlass.hpp"
37 #include "cds/cdsHeapVerifier.hpp"
38 #include "cds/heapShared.hpp"
39 #include "cds/metaspaceShared.hpp"
40 #include "classfile/classLoaderData.hpp"
41 #include "classfile/classLoaderExt.hpp"
42 #include "classfile/javaClasses.inline.hpp"
43 #include "classfile/modules.hpp"
44 #include "classfile/stringTable.hpp"
45 #include "classfile/symbolTable.hpp"
73 #include "gc/g1/g1CollectedHeap.hpp"
74 #endif
75
76 #if INCLUDE_CDS_JAVA_HEAP
77
78 struct ArchivableStaticFieldInfo {
79 const char* klass_name;
80 const char* field_name;
81 InstanceKlass* klass;
82 int offset;
83 BasicType type;
84
85 ArchivableStaticFieldInfo(const char* k, const char* f)
86 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
87
88 bool valid() {
89 return klass_name != nullptr;
90 }
91 };
92
93 class HeapShared::ContextMark : public StackObj {
94 ResourceMark rm;
95 public:
96 ContextMark(const char* c) : rm{} {
97 _context->push(c);
98 }
99 ~ContextMark() {
100 _context->pop();
101 }
102 };
103
104 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
105
106 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
107 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
108 size_t HeapShared::_total_obj_count;
109 size_t HeapShared::_total_obj_size;
110
111 #ifndef PRODUCT
112 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
113 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
114 static const char* _test_class_name = nullptr;
115 static Klass* _test_class = nullptr;
116 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
117 #endif
118
119
120 //
121 // If you add new entries to the following tables, you should know what you're doing!
122 //
123
124 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
125 {"java/lang/Integer$IntegerCache", "archivedCache"},
126 {"java/lang/Long$LongCache", "archivedCache"},
127 {"java/lang/Byte$ByteCache", "archivedCache"},
128 {"java/lang/Short$ShortCache", "archivedCache"},
129 {"java/lang/Character$CharacterCache", "archivedCache"},
130 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
131 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
132 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
133 {"java/util/ImmutableCollections", "archivedObjects"},
134 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
135 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
136 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
137 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
138
139 #ifndef PRODUCT
140 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
141 #endif
142 {nullptr, nullptr},
143 };
144
145 // full module graph
146 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
147 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
148 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
149 {"java/lang/Module$ArchivedData", "archivedData"},
150 {nullptr, nullptr},
151 };
152
153 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
154 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
155 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
156 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
157 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
158 int HeapShared::_root_segment_max_size_elems;
159 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
160 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
161
162 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
163 for (int i = 0; fields[i].valid(); i++) {
164 if (fields[i].klass == ik) {
165 return true;
166 }
167 }
168 return false;
169 }
170
171 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
172 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
173 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
174 }
175
176 unsigned HeapShared::oop_hash(oop const& p) {
224 CHECK);
225 Handle boot_loader(THREAD, result.get_oop());
226 reset_states(boot_loader(), CHECK);
227 }
228
229 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
230
231 bool HeapShared::has_been_archived(oop obj) {
232 assert(CDSConfig::is_dumping_heap(), "dump-time only");
233 return archived_object_cache()->get(obj) != nullptr;
234 }
235
236 int HeapShared::append_root(oop obj) {
237 assert(CDSConfig::is_dumping_heap(), "dump-time only");
238 if (obj != nullptr) {
239 assert(has_been_archived(obj), "must be");
240 }
241 // No GC should happen since we aren't scanning _pending_roots.
242 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
243
244 OopHandle oh(Universe::vm_global(), obj);
245 return _pending_roots->append(oh);
246 }
247
248 objArrayOop HeapShared::root_segment(int segment_idx) {
249 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
250 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
251 } else {
252 assert(CDSConfig::is_using_archive(), "must be");
253 }
254
255 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
256 assert(segment != nullptr, "should have been initialized");
257 return segment;
258 }
259
260 inline unsigned int oop_handle_hash(const OopHandle& oh) {
261 oop o = oh.resolve();
262 if (o == nullptr) {
263 return 0;
264 } else {
265 return o->identity_hash();
266 }
267 }
268
269 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
270 return a.resolve() == b.resolve();
271 }
272
273 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
274 36137, // prime number
275 AnyObj::C_HEAP,
276 mtClassShared,
277 oop_handle_hash,
278 oop_handle_equals> {};
279
280 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
281
282 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
283 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
284 if (_orig_to_scratch_object_table == nullptr) {
285 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
286 }
287
288 OopHandle orig_h(Universe::vm_global(), orig_obj);
289 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
290 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
291 }
292
293 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
294 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
295 if (_orig_to_scratch_object_table != nullptr) {
296 OopHandle orig(&orig_obj);
297 OopHandle* v = _orig_to_scratch_object_table->get(orig);
298 if (v != nullptr) {
299 return v->resolve();
300 }
301 }
302 return nullptr;
303 }
304
305 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
306 // to Strings and MH oops.
307 //
308 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
309 // and are accssed vis AOTCacheAccess::get_archived_object(int).
310 struct PermanentOopInfo {
311 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
312 int _heap_offset; // Offset of the object from the bottom of the archived heap.
313 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
314 };
315
316 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
317 36137, // prime number
318 AnyObj::C_HEAP,
319 mtClassShared,
320 oop_handle_hash,
321 oop_handle_equals> {};
322
323 static int _dumptime_permanent_oop_count = 0;
324 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
325 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
326
327 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
328 // so we can remember their offset (from the bottom of the archived heap).
329 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
330 assert_at_safepoint();
331 if (_dumptime_permanent_oop_table == nullptr) {
332 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
333 }
334
335 PermanentOopInfo info(-1, offset);
336 OopHandle oh(Universe::vm_global(), obj);
337 _dumptime_permanent_oop_table->put_when_absent(oh, info);
338 }
339
340 // A permanent index is assigned to an archived object ONLY when
341 // the AOT compiler calls this function.
342 int HeapShared::get_archived_object_permanent_index(oop obj) {
343 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
344
345 if (!CDSConfig::is_dumping_heap()) {
346 return -1; // Called by the Leyden old workflow
347 }
348 if (_dumptime_permanent_oop_table == nullptr) {
349 return -1;
350 }
351
352 if (_orig_to_scratch_object_table != nullptr) {
353 OopHandle orig(&obj);
354 OopHandle* v = _orig_to_scratch_object_table->get(orig);
355 if (v != nullptr) {
356 obj = v->resolve();
357 }
358 }
359
360 OopHandle tmp(&obj);
361 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
362 if (info == nullptr) {
363 return -1;
364 } else {
365 if (info->_index < 0) {
366 info->_index = _dumptime_permanent_oop_count++;
367 }
368 return info->_index;
369 }
370 }
371
372 oop HeapShared::get_archived_object(int permanent_index) {
373 assert(permanent_index >= 0, "sanity");
374 assert(ArchiveHeapLoader::is_in_use(), "sanity");
375 assert(_runtime_permanent_oops != nullptr, "sanity");
376
377 return _runtime_permanent_oops->at(permanent_index).resolve();
378 }
379
380 // Remember all archived heap objects that have a permanent index.
381 // table[i] = offset of oop whose permanent index is i.
382 void CachedCodeDirectoryInternal::dumptime_init_internal() {
383 const int count = _dumptime_permanent_oop_count;
384 if (count == 0) {
385 // Avoid confusing CDS code with zero-sized tables, just return.
386 log_info(cds)("No permanent oops");
387 _permanent_oop_count = count;
388 _permanent_oop_offsets = nullptr;
389 return;
390 }
391
392 int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
393 for (int i = 0; i < count; i++) {
394 table[count] = -1;
395 }
396 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
397 int index = info._index;
398 if (index >= 0) {
399 assert(index < count, "sanity");
400 table[index] = info._heap_offset;
401 }
402 return true; // continue
403 });
404
405 for (int i = 0; i < count; i++) {
406 assert(table[i] >= 0, "must be");
407 }
408
409 log_info(cds)("Dumped %d permanent oops", count);
410
411 _permanent_oop_count = count;
412 AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
413 }
414
415 // This is called during the bootstrap of the production run, before any GC can happen.
416 // Record each permanent oop in a OopHandle for GC safety.
417 void CachedCodeDirectoryInternal::runtime_init_internal() {
418 int count = _permanent_oop_count;
419 int* table = _permanent_oop_offsets;
420 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
421 for (int i = 0; i < count; i++) {
422 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
423 OopHandle oh(Universe::vm_global(), obj);
424 _runtime_permanent_oops->append(oh);
425 }
426 };
427
428 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
429 assert(_root_segment_max_size_elems > 0, "sanity");
430
431 // Try to avoid divisions for the common case.
432 if (idx < _root_segment_max_size_elems) {
433 seg_idx = 0;
434 int_idx = idx;
435 } else {
436 seg_idx = idx / _root_segment_max_size_elems;
437 int_idx = idx % _root_segment_max_size_elems;
438 }
439
440 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
441 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
442 }
443
444 // Returns an objArray that contains all the roots of the archived objects
445 oop HeapShared::get_root(int index, bool clear) {
446 assert(index >= 0, "sanity");
447 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
572 OopHandle* handle = get(ptr);
573 if (handle != nullptr) {
574 handle->release(Universe::vm_global());
575 remove(ptr);
576 }
577 }
578 };
579
580 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
581 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
582 _scratch_objects_table->set_oop(src, dest);
583 }
584 }
585
586 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
587 return (objArrayOop)_scratch_objects_table->get_oop(src);
588 }
589
590 void HeapShared::init_dumping() {
591 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
592 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
593 }
594
595 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
596 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
597 BasicType bt = (BasicType)i;
598 if (!is_reference_type(bt)) {
599 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
600 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
601 track_scratch_object(Universe::java_mirror(bt), m);
602 }
603 }
604 }
605
606 // Given java_mirror that represents a (primitive or reference) type T,
607 // return the "scratch" version that represents the same type T.
608 // Note that if java_mirror will be returned if it's already a
609 // scratch mirror.
610 //
611 // See java_lang_Class::create_scratch_mirror() for more info.
612 oop HeapShared::scratch_java_mirror(oop java_mirror) {
613 assert(java_lang_Class::is_instance(java_mirror), "must be");
614
615 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
616 BasicType bt = (BasicType)i;
617 if (!is_reference_type(bt)) {
618 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
619 return java_mirror;
620 }
621 }
622 }
623
624 if (java_lang_Class::is_primitive(java_mirror)) {
625 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
626 } else {
627 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
628 }
629 }
630
631 oop HeapShared::scratch_java_mirror(BasicType t) {
632 assert((uint)t < T_VOID+1, "range check");
633 assert(!is_reference_type(t), "sanity");
634 return _scratch_basic_type_mirrors[t].resolve();
635 }
636
637 oop HeapShared::scratch_java_mirror(Klass* k) {
638 return _scratch_objects_table->get_oop(k);
639 }
640
641 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
642 track_scratch_object(k->java_mirror(), mirror);
643 _scratch_objects_table->set_oop(k, mirror);
644 }
645
646 void HeapShared::remove_scratch_objects(Klass* k) {
647 // Klass is being deallocated. Java mirror can still be alive, and it should not
648 // point to dead klass. We need to break the link from mirror to the Klass.
649 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
650 oop mirror = _scratch_objects_table->get_oop(k);
651 if (mirror != nullptr) {
652 java_lang_Class::set_klass(mirror, nullptr);
653 }
654 _scratch_objects_table->remove_oop(k);
655 if (k->is_instance_klass()) {
656 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
657 }
658 if (mirror != nullptr) {
659 OopHandle tmp(&mirror);
660 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
661 if (v != nullptr) {
662 oop scratch_mirror = v->resolve();
663 java_lang_Class::set_klass(scratch_mirror, nullptr);
664 _orig_to_scratch_object_table->remove(tmp);
665 }
666 }
667 }
668
669 //TODO: we eventually want a more direct test for these kinds of things.
670 //For example the JVM could record some bit of context from the creation
671 //of the klass, such as who called the hidden class factory. Using
672 //string compares on names is fragile and will break as soon as somebody
673 //changes the names in the JDK code. See discussion in JDK-8342481 for
674 //related ideas about marking AOT-related classes.
675 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
676 return ik->is_hidden() &&
677 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
678 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
679 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
680 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
681 }
682
683 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
684 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
685 }
686
827 assert(info != nullptr, "must be");
828 has_oop_pointers = info->has_oop_pointers();
829 has_native_pointers = info->has_native_pointers();
830 }
831
832 void HeapShared::set_has_native_pointers(oop src_obj) {
833 CachedOopInfo* info = archived_object_cache()->get(src_obj);
834 assert(info != nullptr, "must be");
835 info->set_has_native_pointers();
836 }
837
838 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
839 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
840 void HeapShared::start_scanning_for_oops() {
841 {
842 NoSafepointVerifier nsv;
843
844 // The special subgraph doesn't belong to any class. We use Object_klass() here just
845 // for convenience.
846 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
847 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
848
849 // Cache for recording where the archived objects are copied to
850 create_archived_object_cache();
851
852 if (UseCompressedOops || UseG1GC) {
853 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
854 UseCompressedOops ? p2i(CompressedOops::begin()) :
855 p2i((address)G1CollectedHeap::heap()->reserved().start()),
856 UseCompressedOops ? p2i(CompressedOops::end()) :
857 p2i((address)G1CollectedHeap::heap()->reserved().end()));
858 }
859
860 archive_subgraphs();
861 }
862
863 init_seen_objects_table();
864 Universe::archive_exception_instances();
865 }
866
867 void HeapShared::end_scanning_for_oops() {
868 archive_strings();
869 delete_seen_objects_table();
870 }
871
872 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
873 {
874 NoSafepointVerifier nsv;
875 if (!SkipArchiveHeapVerification) {
876 CDSHeapVerifier::verify();
877 }
878 check_special_subgraph_classes();
879 }
880
881 StringTable::write_shared_table();
882 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
883 for (int i = 0; i < _pending_roots->length(); i++) {
884 roots->append(_pending_roots->at(i).resolve());
885 }
886 ArchiveHeapWriter::write(roots, heap_info);
887 delete roots;
888
889 ArchiveBuilder::OtherROAllocMark mark;
890 write_subgraph_info_table();
891 }
892
893 void HeapShared::scan_java_mirror(oop orig_mirror) {
894 oop m = scratch_java_mirror(orig_mirror);
895 if (m != nullptr) { // nullptr if for custom class loader
896 copy_java_mirror_hashcode(orig_mirror, m);
897 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
898 assert(success, "sanity");
899 }
900 }
901
902 void HeapShared::scan_java_class(Klass* orig_k) {
903 scan_java_mirror(orig_k->java_mirror());
904
905 if (orig_k->is_instance_klass()) {
906 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
907 orig_ik->constants()->prepare_resolved_references_for_archiving();
1273 which, k->external_name());
1274 FlagSetting fs1(VerifyBeforeGC, true);
1275 FlagSetting fs2(VerifyDuringGC, true);
1276 FlagSetting fs3(VerifyAfterGC, true);
1277 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1278 }
1279 }
1280 }
1281
1282 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1283 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1284 //
1285 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1286 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1287 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1288 void HeapShared::resolve_classes(JavaThread* current) {
1289 assert(CDSConfig::is_using_archive(), "runtime only!");
1290 if (!ArchiveHeapLoader::is_in_use()) {
1291 return; // nothing to do
1292 }
1293
1294 if (!CDSConfig::is_using_aot_linked_classes()) {
1295 assert( _run_time_special_subgraph != nullptr, "must be");
1296 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1297 if (klasses != nullptr) {
1298 for (int i = 0; i < klasses->length(); i++) {
1299 Klass* k = klasses->at(i);
1300 ExceptionMark em(current); // no exception can happen here
1301 resolve_or_init(k, /*do_init*/false, current);
1302 }
1303 }
1304 }
1305
1306 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1307 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1308 }
1309
1310 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1311 for (int i = 0; fields[i].valid(); i++) {
1312 ArchivableStaticFieldInfo* info = &fields[i];
1313 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1314 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1315 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1316 resolve_classes_for_subgraph_of(current, k);
1317 }
1318 }
1319
1320 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1321 JavaThread* THREAD = current;
1322 ExceptionMark em(THREAD);
1323 const ArchivedKlassSubGraphInfoRecord* record =
1324 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1325 if (HAS_PENDING_EXCEPTION) {
1661 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1662 PointsToOopsChecker points_to_oops_checker;
1663 obj->oop_iterate(&points_to_oops_checker);
1664 return CachedOopInfo(referrer, points_to_oops_checker.result());
1665 }
1666
1667 void HeapShared::init_box_classes(TRAPS) {
1668 if (ArchiveHeapLoader::is_in_use()) {
1669 vmClasses::Boolean_klass()->initialize(CHECK);
1670 vmClasses::Character_klass()->initialize(CHECK);
1671 vmClasses::Float_klass()->initialize(CHECK);
1672 vmClasses::Double_klass()->initialize(CHECK);
1673 vmClasses::Byte_klass()->initialize(CHECK);
1674 vmClasses::Short_klass()->initialize(CHECK);
1675 vmClasses::Integer_klass()->initialize(CHECK);
1676 vmClasses::Long_klass()->initialize(CHECK);
1677 vmClasses::Void_klass()->initialize(CHECK);
1678 }
1679 }
1680
1681 void HeapShared::exit_on_error() {
1682 if (_context != nullptr) {
1683 ResourceMark rm;
1684 LogStream ls(Log(cds, heap)::error());
1685 ls.print_cr("Context");
1686 for (int i = 0; i < _context->length(); i++) {
1687 const char* s = _context->at(i);
1688 ls.print_cr("- %s", s);
1689 }
1690 }
1691 debug_trace();
1692 MetaspaceShared::unrecoverable_writing_error();
1693 }
1694
1695 // (1) If orig_obj has not been archived yet, archive it.
1696 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1697 // trace all objects that are reachable from it, and make sure these objects are archived.
1698 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1699 // were already archived when this function is called)
1700 bool HeapShared::archive_reachable_objects_from(int level,
1701 KlassSubGraphInfo* subgraph_info,
1702 oop orig_obj) {
1703 assert(orig_obj != nullptr, "must be");
1704 PendingOopStack stack;
1705 stack.push(PendingOop(orig_obj, nullptr, level));
1706
1707 while (stack.length() > 0) {
1708 PendingOop po = stack.pop();
1709 _object_being_archived = po;
1710 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1711 _object_being_archived = PendingOop();
1712
1713 if (!status) {
1714 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1790
1791 bool already_archived = has_been_archived(orig_obj);
1792 bool record_klasses_only = already_archived;
1793 if (!already_archived) {
1794 ++_num_new_archived_objs;
1795 if (!archive_object(orig_obj, referrer, subgraph_info)) {
1796 // Skip archiving the sub-graph referenced from the current entry field.
1797 ResourceMark rm;
1798 log_error(aot, heap)(
1799 "Cannot archive the sub-graph referenced from %s object ("
1800 PTR_FORMAT ") size %zu, skipped.",
1801 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1802 if (level == 1) {
1803 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1804 // as the Java code will take care of initializing this field dynamically.
1805 return false;
1806 } else {
1807 // We don't know how to handle an object that has been archived, but some of its reachable
1808 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1809 // we have a real use case.
1810 exit_on_error();
1811 }
1812 }
1813 }
1814
1815 Klass *orig_k = orig_obj->klass();
1816 subgraph_info->add_subgraph_object_klass(orig_k);
1817
1818 {
1819 // Find all the oops that are referenced by orig_obj, push them onto the stack
1820 // so we can work on them next.
1821 ResourceMark rm;
1822 OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1823 orig_obj->oop_iterate(&pusher);
1824 }
1825
1826 if (CDSConfig::is_initing_classes_at_dump_time()) {
1827 // The enum klasses are archived with aot-initialized mirror.
1828 // See AOTClassInitializer::can_archive_initialized_mirror().
1829 } else {
1830 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
2244
2245 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2246 bool is_full_module_graph) {
2247 _num_total_subgraph_recordings = 0;
2248 _num_total_walked_objs = 0;
2249 _num_total_archived_objs = 0;
2250 _num_total_recorded_klasses = 0;
2251 _num_total_verifications = 0;
2252
2253 // For each class X that has one or more archived fields:
2254 // [1] Dump the subgraph of each archived field
2255 // [2] Create a list of all the class of the objects that can be reached
2256 // by any of these static fields.
2257 // At runtime, these classes are initialized before X's archived fields
2258 // are restored by HeapShared::initialize_from_archived_subgraph().
2259 for (int i = 0; fields[i].valid(); ) {
2260 ArchivableStaticFieldInfo* info = &fields[i];
2261 const char* klass_name = info->klass_name;
2262 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2263
2264 ContextMark cm(klass_name);
2265 // If you have specified consecutive fields of the same klass in
2266 // fields[], these will be archived in the same
2267 // {start_recording_subgraph ... done_recording_subgraph} pass to
2268 // save time.
2269 for (; fields[i].valid(); i++) {
2270 ArchivableStaticFieldInfo* f = &fields[i];
2271 if (f->klass_name != klass_name) {
2272 break;
2273 }
2274
2275 ContextMark cm(f->field_name);
2276 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2277 f->offset, f->field_name);
2278 }
2279 done_recording_subgraph(info->klass, klass_name);
2280 }
2281
2282 log_info(aot, heap)("Archived subgraph records = %d",
2283 _num_total_subgraph_recordings);
2284 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2285 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2286 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2287
2288 #ifndef PRODUCT
2289 for (int i = 0; fields[i].valid(); i++) {
2290 ArchivableStaticFieldInfo* f = &fields[i];
2291 verify_subgraph_from_static_field(f->klass, f->offset);
2292 }
2293 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2294 #endif
2295 }
|