12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/archiveBuilder.hpp"
29 #include "cds/archiveHeapLoader.hpp"
30 #include "cds/archiveHeapWriter.hpp"
31 #include "cds/archiveUtils.hpp"
32 #include "cds/cdsConfig.hpp"
33 #include "cds/cdsEnumKlass.hpp"
34 #include "cds/cdsHeapVerifier.hpp"
35 #include "cds/heapShared.hpp"
36 #include "cds/metaspaceShared.hpp"
37 #include "classfile/classLoaderData.hpp"
38 #include "classfile/classLoaderExt.hpp"
39 #include "classfile/javaClasses.inline.hpp"
40 #include "classfile/modules.hpp"
41 #include "classfile/stringTable.hpp"
42 #include "classfile/symbolTable.hpp"
43 #include "classfile/systemDictionary.hpp"
44 #include "classfile/systemDictionaryShared.hpp"
45 #include "classfile/vmClasses.hpp"
46 #include "classfile/vmSymbols.hpp"
47 #include "gc/shared/collectedHeap.hpp"
48 #include "gc/shared/gcLocker.hpp"
49 #include "gc/shared/gcVMOperations.hpp"
50 #include "logging/log.hpp"
51 #include "logging/logStream.hpp"
70 #include "gc/g1/g1CollectedHeap.hpp"
71 #endif
72
73 #if INCLUDE_CDS_JAVA_HEAP
74
75 struct ArchivableStaticFieldInfo {
76 const char* klass_name;
77 const char* field_name;
78 InstanceKlass* klass;
79 int offset;
80 BasicType type;
81
82 ArchivableStaticFieldInfo(const char* k, const char* f)
83 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
84
85 bool valid() {
86 return klass_name != nullptr;
87 }
88 };
89
90 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
91
92 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
93 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
94 size_t HeapShared::_total_obj_count;
95 size_t HeapShared::_total_obj_size;
96
97 #ifndef PRODUCT
98 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
99 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
100 static const char* _test_class_name = nullptr;
101 static Klass* _test_class = nullptr;
102 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
103 #endif
104
105
106 //
107 // If you add new entries to the following tables, you should know what you're doing!
108 //
109
110 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
111 {"java/lang/Integer$IntegerCache", "archivedCache"},
112 {"java/lang/Long$LongCache", "archivedCache"},
113 {"java/lang/Byte$ByteCache", "archivedCache"},
114 {"java/lang/Short$ShortCache", "archivedCache"},
115 {"java/lang/Character$CharacterCache", "archivedCache"},
116 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
117 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
118 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
119 {"java/util/ImmutableCollections", "archivedObjects"},
120 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
121 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
122 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
123
124 #ifndef PRODUCT
125 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
126 #endif
127 {nullptr, nullptr},
128 };
129
130 // full module graph
131 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
132 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
133 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
134 {"java/lang/Module$ArchivedData", "archivedData"},
135 {nullptr, nullptr},
136 };
137
138 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
139 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
140 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
141 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
142 int HeapShared::_root_segment_max_size_elems;
143 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
144 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
145 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
146
147 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
148 for (int i = 0; fields[i].valid(); i++) {
149 if (fields[i].klass == ik) {
150 return true;
151 }
152 }
153 return false;
154 }
155
156 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
157 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
158 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
159 }
160
210 Handle boot_loader(THREAD, result.get_oop());
211 reset_states(boot_loader(), CHECK);
212 }
213
214 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
215
216 bool HeapShared::has_been_archived(oop obj) {
217 assert(CDSConfig::is_dumping_heap(), "dump-time only");
218 return archived_object_cache()->get(obj) != nullptr;
219 }
220
221 int HeapShared::append_root(oop obj) {
222 assert(CDSConfig::is_dumping_heap(), "dump-time only");
223 if (obj != nullptr) {
224 assert(has_been_archived(obj), "must be");
225 }
226 // No GC should happen since we aren't scanning _pending_roots.
227 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
228
229 if (_pending_roots == nullptr) {
230 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
231 }
232
233 return _pending_roots->append(obj);
234 }
235
236 objArrayOop HeapShared::root_segment(int segment_idx) {
237 if (CDSConfig::is_dumping_heap()) {
238 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
239 } else {
240 assert(CDSConfig::is_using_archive(), "must be");
241 }
242
243 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
244 assert(segment != nullptr, "should have been initialized");
245 return segment;
246 }
247
248 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
249 assert(_root_segment_max_size_elems > 0, "sanity");
250
251 // Try to avoid divisions for the common case.
252 if (idx < _root_segment_max_size_elems) {
253 seg_idx = 0;
254 int_idx = idx;
255 } else {
256 seg_idx = idx / _root_segment_max_size_elems;
257 int_idx = idx % _root_segment_max_size_elems;
258 }
259
260 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
261 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
262 }
263
264 // Returns an objArray that contains all the roots of the archived objects
265 oop HeapShared::get_root(int index, bool clear) {
266 assert(index >= 0, "sanity");
267 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
272 if (clear) {
273 clear_root(index);
274 }
275 return result;
276 }
277
278 void HeapShared::clear_root(int index) {
279 assert(index >= 0, "sanity");
280 assert(CDSConfig::is_using_archive(), "must be");
281 if (ArchiveHeapLoader::is_in_use()) {
282 int seg_idx, int_idx;
283 get_segment_indexes(index, seg_idx, int_idx);
284 if (log_is_enabled(Debug, cds, heap)) {
285 oop old = root_segment(seg_idx)->obj_at(int_idx);
286 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
287 }
288 root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
289 }
290 }
291
292 bool HeapShared::archive_object(oop obj, KlassSubGraphInfo* subgraph_info) {
293 assert(CDSConfig::is_dumping_heap(), "dump-time only");
294
295 assert(!obj->is_stackChunk(), "do not archive stack chunks");
296 if (has_been_archived(obj)) {
297 return true;
298 }
299
300 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
301 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
302 p2i(obj), obj->size());
303 debug_trace();
304 return false;
305 } else {
306 count_allocation(obj->size());
307 ArchiveHeapWriter::add_source_obj(obj);
308 CachedOopInfo info = make_cached_oop_info(obj);
309 archived_object_cache()->put_when_absent(obj, info);
310 archived_object_cache()->maybe_grow();
311 mark_native_pointers(obj);
312
313 Klass* k = obj->klass();
314 if (k->is_instance_klass()) {
315 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
316 // This ensures that during the production run, whenever Java code sees a cached object
317 // of type X, we know that X is already initialized. (see TODO comment below ...)
318
319 if (InstanceKlass::cast(k)->is_enum_subclass()
320 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
321 // we must store them as AOT-initialized.
322 || (subgraph_info == _dump_time_special_subgraph))
323 // TODO: we do this only for the special subgraph for now. Extending this to
324 // other subgraphs would require more refactoring of the core library (such as
325 // move some initialization logic into runtimeSetup()).
326 //
327 // For the other subgraphs, we have a weaker mechanism to ensure that
328 // all classes in a subgraph are initialized before the subgraph is programmatically
375 return nullptr;
376 }
377 }
378 void set_oop(MetaspaceObj* ptr, oop o) {
379 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
380 OopHandle handle(Universe::vm_global(), o);
381 bool is_new = put(ptr, handle);
382 assert(is_new, "cannot set twice");
383 }
384 void remove_oop(MetaspaceObj* ptr) {
385 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
386 OopHandle* handle = get(ptr);
387 if (handle != nullptr) {
388 handle->release(Universe::vm_global());
389 remove(ptr);
390 }
391 }
392 };
393
394 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
395 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
396 _scratch_references_table->set_oop(src, dest);
397 }
398 }
399
400 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
401 return (objArrayOop)_scratch_references_table->get_oop(src);
402 }
403
404 void HeapShared::init_dumping() {
405 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
406 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
407 }
408
409 void HeapShared::init_scratch_objects(TRAPS) {
410 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
411 BasicType bt = (BasicType)i;
412 if (!is_reference_type(bt)) {
413 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
414 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
415 }
416 }
417 }
418
419 // Given java_mirror that represents a (primitive or reference) type T,
420 // return the "scratch" version that represents the same type T.
421 // Note that if java_mirror will be returned if it's already a
422 // scratch mirror.
423 //
424 // See java_lang_Class::create_scratch_mirror() for more info.
425 oop HeapShared::scratch_java_mirror(oop java_mirror) {
426 assert(java_lang_Class::is_instance(java_mirror), "must be");
427
428 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
429 BasicType bt = (BasicType)i;
430 if (!is_reference_type(bt)) {
431 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
432 return java_mirror;
433 }
434 }
435 }
436
437 if (java_lang_Class::is_primitive(java_mirror)) {
438 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
439 } else {
440 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
441 }
442 }
443
444 oop HeapShared::scratch_java_mirror(BasicType t) {
445 assert((uint)t < T_VOID+1, "range check");
446 assert(!is_reference_type(t), "sanity");
447 return _scratch_basic_type_mirrors[t].resolve();
448 }
449
450 oop HeapShared::scratch_java_mirror(Klass* k) {
451 return _scratch_java_mirror_table->get_oop(k);
452 }
453
454 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
455 _scratch_java_mirror_table->set_oop(k, mirror);
456 }
457
458 void HeapShared::remove_scratch_objects(Klass* k) {
459 // Klass is being deallocated. Java mirror can still be alive, and it should not
460 // point to dead klass. We need to break the link from mirror to the Klass.
461 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
462 oop mirror = _scratch_java_mirror_table->get_oop(k);
463 if (mirror != nullptr) {
464 java_lang_Class::set_klass(mirror, nullptr);
465 }
466 _scratch_java_mirror_table->remove_oop(k);
467 if (k->is_instance_klass()) {
468 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
469 }
470 }
471
472 //TODO: we eventually want a more direct test for these kinds of things.
473 //For example the JVM could record some bit of context from the creation
474 //of the klass, such as who called the hidden class factory. Using
475 //string compares on names is fragile and will break as soon as somebody
476 //changes the names in the JDK code. See discussion in JDK-8342481 for
477 //related ideas about marking AOT-related classes.
478 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
479 return ik->is_hidden() &&
480 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
481 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
482 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
483 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
484 }
485
486 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
487 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
488 }
489
633 assert(info != nullptr, "must be");
634 has_oop_pointers = info->has_oop_pointers();
635 has_native_pointers = info->has_native_pointers();
636 }
637
638 void HeapShared::set_has_native_pointers(oop src_obj) {
639 CachedOopInfo* info = archived_object_cache()->get(src_obj);
640 assert(info != nullptr, "must be");
641 info->set_has_native_pointers();
642 }
643
644 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
645 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
646 void HeapShared::start_scanning_for_oops() {
647 {
648 NoSafepointVerifier nsv;
649
650 // The special subgraph doesn't belong to any class. We use Object_klass() here just
651 // for convenience.
652 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
653
654 // Cache for recording where the archived objects are copied to
655 create_archived_object_cache();
656
657 if (UseCompressedOops || UseG1GC) {
658 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
659 UseCompressedOops ? p2i(CompressedOops::begin()) :
660 p2i((address)G1CollectedHeap::heap()->reserved().start()),
661 UseCompressedOops ? p2i(CompressedOops::end()) :
662 p2i((address)G1CollectedHeap::heap()->reserved().end()));
663 }
664
665 archive_subgraphs();
666 }
667
668 init_seen_objects_table();
669 Universe::archive_exception_instances();
670 }
671
672 void HeapShared::end_scanning_for_oops() {
673 archive_strings();
674 delete_seen_objects_table();
675 }
676
677 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
678 {
679 NoSafepointVerifier nsv;
680 CDSHeapVerifier::verify();
681 check_special_subgraph_classes();
682 }
683
684 StringTable::write_shared_table(_dumped_interned_strings);
685 ArchiveHeapWriter::write(_pending_roots, heap_info);
686
687 ArchiveBuilder::OtherROAllocMark mark;
688 write_subgraph_info_table();
689 }
690
691 void HeapShared::scan_java_mirror(oop orig_mirror) {
692 oop m = scratch_java_mirror(orig_mirror);
693 if (m != nullptr) { // nullptr if for custom class loader
694 copy_java_mirror_hashcode(orig_mirror, m);
695 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
696 assert(success, "sanity");
697 }
698 }
699
700 void HeapShared::scan_java_class(Klass* orig_k) {
701 scan_java_mirror(orig_k->java_mirror());
702
703 if (orig_k->is_instance_klass()) {
704 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
705 orig_ik->constants()->prepare_resolved_references_for_archiving();
812 return;
813 }
814 } else {
815 assert(orig_k->is_typeArray_klass(), "must be");
816 // Primitive type arrays are created early during Universe::genesis.
817 return;
818 }
819
820 if (log_is_enabled(Debug, cds, heap)) {
821 if (!_subgraph_object_klasses->contains(orig_k)) {
822 ResourceMark rm;
823 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
824 }
825 }
826
827 _subgraph_object_klasses->append_if_missing(orig_k);
828 _has_non_early_klasses |= is_non_early_klass(orig_k);
829 }
830
831 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
832 if (ik->module()->name() == vmSymbols::java_base()) {
833 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
834 return;
835 }
836
837 const char* lambda_msg = "";
838 if (CDSConfig::is_dumping_invokedynamic()) {
839 lambda_msg = ", or a lambda proxy class";
840 if (HeapShared::is_lambda_proxy_klass(ik) &&
841 (ik->class_loader() == nullptr ||
842 ik->class_loader() == SystemDictionary::java_platform_loader() ||
843 ik->class_loader() == SystemDictionary::java_system_loader())) {
844 return;
845 }
846 }
847
848 #ifndef PRODUCT
849 if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) {
850 // This class is loaded by ArchiveHeapTestClass
851 return;
1063 which, k->external_name());
1064 FlagSetting fs1(VerifyBeforeGC, true);
1065 FlagSetting fs2(VerifyDuringGC, true);
1066 FlagSetting fs3(VerifyAfterGC, true);
1067 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1068 }
1069 }
1070 }
1071
1072 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1073 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1074 //
1075 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1076 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1077 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1078 void HeapShared::resolve_classes(JavaThread* current) {
1079 assert(CDSConfig::is_using_archive(), "runtime only!");
1080 if (!ArchiveHeapLoader::is_in_use()) {
1081 return; // nothing to do
1082 }
1083 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1084 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1085 }
1086
1087 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1088 for (int i = 0; fields[i].valid(); i++) {
1089 ArchivableStaticFieldInfo* info = &fields[i];
1090 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1091 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1092 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1093 resolve_classes_for_subgraph_of(current, k);
1094 }
1095 }
1096
1097 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1098 JavaThread* THREAD = current;
1099 ExceptionMark em(THREAD);
1100 const ArchivedKlassSubGraphInfoRecord* record =
1101 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1102 if (HAS_PENDING_EXCEPTION) {
1335
1336 verify_the_heap(k, "after ");
1337 }
1338
1339 void HeapShared::clear_archived_roots_of(Klass* k) {
1340 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1341 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1342 if (record != nullptr) {
1343 Array<int>* entry_field_records = record->entry_field_records();
1344 if (entry_field_records != nullptr) {
1345 int efr_len = entry_field_records->length();
1346 assert(efr_len % 2 == 0, "sanity");
1347 for (int i = 0; i < efr_len; i += 2) {
1348 int root_index = entry_field_records->at(i+1);
1349 clear_root(root_index);
1350 }
1351 }
1352 }
1353 }
1354
1355 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1356 int _level;
1357 bool _record_klasses_only;
1358 KlassSubGraphInfo* _subgraph_info;
1359 oop _referencing_obj;
1360
1361 // The following are for maintaining a stack for determining
1362 // CachedOopInfo::_referrer
1363 static WalkOopAndArchiveClosure* _current;
1364 WalkOopAndArchiveClosure* _last;
1365 public:
1366 WalkOopAndArchiveClosure(int level,
1367 bool record_klasses_only,
1368 KlassSubGraphInfo* subgraph_info,
1369 oop orig) :
1370 _level(level),
1371 _record_klasses_only(record_klasses_only),
1372 _subgraph_info(subgraph_info),
1373 _referencing_obj(orig) {
1374 _last = _current;
1375 _current = this;
1376 }
1377 ~WalkOopAndArchiveClosure() {
1378 _current = _last;
1379 }
1380 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1381 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1382
1383 protected:
1384 template <class T> void do_oop_work(T *p) {
1385 oop obj = RawAccess<>::oop_load(p);
1386 if (!CompressedOops::is_null(obj)) {
1387 size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1388
1389 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1390 ResourceMark rm;
1391 log_debug(cds, heap)("(%d) %s[%zu] ==> " PTR_FORMAT " size %zu %s", _level,
1392 _referencing_obj->klass()->external_name(), field_delta,
1393 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1394 if (log_is_enabled(Trace, cds, heap)) {
1395 LogTarget(Trace, cds, heap) log;
1396 LogStream out(log);
1397 obj->print_on(&out);
1398 }
1399 }
1400
1401 bool success = HeapShared::archive_reachable_objects_from(
1402 _level + 1, _subgraph_info, obj);
1403 assert(success, "VM should have exited with unarchivable objects for _level > 1");
1404 }
1405 }
1406
1407 public:
1408 static WalkOopAndArchiveClosure* current() { return _current; }
1409 oop referencing_obj() { return _referencing_obj; }
1410 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
1411 };
1412
1413 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
1414
1415 // Checks if an oop has any non-null oop fields
1416 class PointsToOopsChecker : public BasicOopIterateClosure {
1417 bool _result;
1418
1419 template <class T> void check(T *p) {
1420 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1421 }
1422
1423 public:
1424 PointsToOopsChecker() : _result(false) {}
1425 void do_oop(narrowOop *p) { check(p); }
1426 void do_oop( oop *p) { check(p); }
1427 bool result() { return _result; }
1428 };
1429
1430 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1431 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1432 oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1433 PointsToOopsChecker points_to_oops_checker;
1434 obj->oop_iterate(&points_to_oops_checker);
1435 return CachedOopInfo(referrer, points_to_oops_checker.result());
1436 }
1437
1438 void HeapShared::init_box_classes(TRAPS) {
1439 if (ArchiveHeapLoader::is_in_use()) {
1440 vmClasses::Boolean_klass()->initialize(CHECK);
1441 vmClasses::Character_klass()->initialize(CHECK);
1442 vmClasses::Float_klass()->initialize(CHECK);
1443 vmClasses::Double_klass()->initialize(CHECK);
1444 vmClasses::Byte_klass()->initialize(CHECK);
1445 vmClasses::Short_klass()->initialize(CHECK);
1446 vmClasses::Integer_klass()->initialize(CHECK);
1447 vmClasses::Long_klass()->initialize(CHECK);
1448 vmClasses::Void_klass()->initialize(CHECK);
1449 }
1450 }
1451
1452 // (1) If orig_obj has not been archived yet, archive it.
1453 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1454 // trace all objects that are reachable from it, and make sure these objects are archived.
1455 // (3) Record the klasses of all orig_obj and all reachable objects.
1456 bool HeapShared::archive_reachable_objects_from(int level,
1457 KlassSubGraphInfo* subgraph_info,
1458 oop orig_obj) {
1459 assert(orig_obj != nullptr, "must be");
1460
1461 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1462 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1463 // If you get an error here, you probably made a change in the JDK library that has added
1464 // these objects that are referenced (directly or indirectly) by static fields.
1465 ResourceMark rm;
1466 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1467 debug_trace();
1468 MetaspaceShared::unrecoverable_writing_error();
1469 }
1470
1471 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1472 ResourceMark rm;
1473 LogTarget(Debug, cds, heap) log;
1474 LogStream out(log);
1475 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1476 Klass* k = java_lang_Class::as_Klass(orig_obj);
1477 if (k != nullptr) {
1478 out.print("%s", k->external_name());
1479 } else {
1480 out.print("primitive");
1481 }
1482 out.print_cr("; scratch mirror = " PTR_FORMAT,
1483 p2i(scratch_java_mirror(orig_obj)));
1484 }
1485
1486 if (CDSConfig::is_initing_classes_at_dump_time()) {
1487 if (java_lang_Class::is_instance(orig_obj)) {
1488 orig_obj = scratch_java_mirror(orig_obj);
1509 // If you get an error here, you probably made a change in the JDK library that has added a Class
1510 // object that is referenced (directly or indirectly) by an ArchivableStaticFieldInfo
1511 // defined at the top of this file.
1512 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1513 debug_trace();
1514 MetaspaceShared::unrecoverable_writing_error();
1515 }
1516 }
1517
1518 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1519 // orig_obj has already been archived and traced. Nothing more to do.
1520 return true;
1521 } else {
1522 set_has_been_seen_during_subgraph_recording(orig_obj);
1523 }
1524
1525 bool already_archived = has_been_archived(orig_obj);
1526 bool record_klasses_only = already_archived;
1527 if (!already_archived) {
1528 ++_num_new_archived_objs;
1529 if (!archive_object(orig_obj, subgraph_info)) {
1530 // Skip archiving the sub-graph referenced from the current entry field.
1531 ResourceMark rm;
1532 log_error(cds, heap)(
1533 "Cannot archive the sub-graph referenced from %s object ("
1534 PTR_FORMAT ") size %zu, skipped.",
1535 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1536 if (level == 1) {
1537 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1538 // as the Java code will take care of initializing this field dynamically.
1539 return false;
1540 } else {
1541 // We don't know how to handle an object that has been archived, but some of its reachable
1542 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1543 // we have a real use case.
1544 MetaspaceShared::unrecoverable_writing_error();
1545 }
1546 }
1547 }
1548
1549 Klass *orig_k = orig_obj->klass();
1550 subgraph_info->add_subgraph_object_klass(orig_k);
1551
1552 WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1553 orig_obj->oop_iterate(&walker);
1554
1555 if (CDSConfig::is_initing_classes_at_dump_time()) {
1556 // The enum klasses are archived with aot-initialized mirror.
1557 // See AOTClassInitializer::can_archive_initialized_mirror().
1558 } else {
1559 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1560 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1561 }
1562 }
1563
1564 return true;
1565 }
1566
1567 //
1568 // Start from the given static field in a java mirror and archive the
1569 // complete sub-graph of java heap objects that are reached directly
1570 // or indirectly from the starting object by following references.
1571 // Sub-graph archiving restrictions (current):
1572 //
1573 // - All classes of objects in the archived sub-graph (including the
1574 // entry class) must be boot class only.
1575 // - No java.lang.Class instance (java mirror) can be included inside
1576 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1577 //
1578 // The Java heap object sub-graph archiving process (see
1579 // WalkOopAndArchiveClosure):
1580 //
1581 // 1) Java object sub-graph archiving starts from a given static field
1582 // within a Class instance (java mirror). If the static field is a
1583 // reference field and points to a non-null java object, proceed to
1584 // the next step.
1585 //
1586 // 2) Archives the referenced java object. If an archived copy of the
1587 // current object already exists, updates the pointer in the archived
1588 // copy of the referencing object to point to the current archived object.
1589 // Otherwise, proceed to the next step.
1590 //
1591 // 3) Follows all references within the current java object and recursively
1592 // archive the sub-graph of objects starting from each reference.
1593 //
1594 // 4) Updates the pointer in the archived copy of referencing object to
1595 // point to the current archived object.
1596 //
1597 // 5) The Klass of the current java object is added to the list of Klasses
1598 // for loading and initializing before any object in the archived graph can
1599 // be accessed at runtime.
1707 for (int i = 0; i < num; i++) {
1708 Klass* subgraph_k = klasses->at(i);
1709 Symbol* name = subgraph_k->name();
1710 if (subgraph_k->is_instance_klass() &&
1711 name != vmSymbols::java_lang_Class() &&
1712 name != vmSymbols::java_lang_String() &&
1713 name != vmSymbols::java_lang_ArithmeticException() &&
1714 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1715 name != vmSymbols::java_lang_ArrayStoreException() &&
1716 name != vmSymbols::java_lang_ClassCastException() &&
1717 name != vmSymbols::java_lang_InternalError() &&
1718 name != vmSymbols::java_lang_NullPointerException()) {
1719 ResourceMark rm;
1720 fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
1721 }
1722 }
1723 }
1724 }
1725
1726 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1727 int HeapShared::_num_new_walked_objs;
1728 int HeapShared::_num_new_archived_objs;
1729 int HeapShared::_num_old_recorded_klasses;
1730
1731 int HeapShared::_num_total_subgraph_recordings = 0;
1732 int HeapShared::_num_total_walked_objs = 0;
1733 int HeapShared::_num_total_archived_objs = 0;
1734 int HeapShared::_num_total_recorded_klasses = 0;
1735 int HeapShared::_num_total_verifications = 0;
1736
1737 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1738 return _seen_objects_table->get(obj) != nullptr;
1739 }
1740
1741 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1742 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1743 _seen_objects_table->put_when_absent(obj, true);
1744 _seen_objects_table->maybe_grow();
1745 ++ _num_new_walked_objs;
1746 }
1973
1974 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1975 bool is_full_module_graph) {
1976 _num_total_subgraph_recordings = 0;
1977 _num_total_walked_objs = 0;
1978 _num_total_archived_objs = 0;
1979 _num_total_recorded_klasses = 0;
1980 _num_total_verifications = 0;
1981
1982 // For each class X that has one or more archived fields:
1983 // [1] Dump the subgraph of each archived field
1984 // [2] Create a list of all the class of the objects that can be reached
1985 // by any of these static fields.
1986 // At runtime, these classes are initialized before X's archived fields
1987 // are restored by HeapShared::initialize_from_archived_subgraph().
1988 for (int i = 0; fields[i].valid(); ) {
1989 ArchivableStaticFieldInfo* info = &fields[i];
1990 const char* klass_name = info->klass_name;
1991 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1992
1993 // If you have specified consecutive fields of the same klass in
1994 // fields[], these will be archived in the same
1995 // {start_recording_subgraph ... done_recording_subgraph} pass to
1996 // save time.
1997 for (; fields[i].valid(); i++) {
1998 ArchivableStaticFieldInfo* f = &fields[i];
1999 if (f->klass_name != klass_name) {
2000 break;
2001 }
2002
2003 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2004 f->offset, f->field_name);
2005 }
2006 done_recording_subgraph(info->klass, klass_name);
2007 }
2008
2009 log_info(cds, heap)("Archived subgraph records = %d",
2010 _num_total_subgraph_recordings);
2011 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2012 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2013 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2014
2015 #ifndef PRODUCT
2016 for (int i = 0; fields[i].valid(); i++) {
2017 ArchivableStaticFieldInfo* f = &fields[i];
2018 verify_subgraph_from_static_field(f->klass, f->offset);
2019 }
2020 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2021 #endif
2022 }
2028 // [2] included in the SharedArchiveConfigFile.
2029 void HeapShared::add_to_dumped_interned_strings(oop string) {
2030 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2031 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2032 bool created;
2033 _dumped_interned_strings->put_if_absent(string, true, &created);
2034 if (created) {
2035 // Prevent string deduplication from changing the value field to
2036 // something not in the archive.
2037 java_lang_String::set_deduplication_forbidden(string);
2038 _dumped_interned_strings->maybe_grow();
2039 }
2040 }
2041
2042 bool HeapShared::is_dumped_interned_string(oop o) {
2043 return _dumped_interned_strings->get(o) != nullptr;
2044 }
2045
2046 void HeapShared::debug_trace() {
2047 ResourceMark rm;
2048 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
2049 if (walker != nullptr) {
2050 LogStream ls(Log(cds, heap)::error());
2051 CDSHeapVerifier::trace_to_root(&ls, walker->referencing_obj());
2052 }
2053 }
2054
2055 #ifndef PRODUCT
2056 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2057 // region. This way we can quickly relocate all the pointers without using
2058 // BasicOopIterateClosure at runtime.
2059 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2060 void* _start;
2061 BitMap *_oopmap;
2062 int _num_total_oops;
2063 int _num_null_oops;
2064 public:
2065 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
2066 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {}
2067
2068 virtual void do_oop(narrowOop* p) {
2069 assert(UseCompressedOops, "sanity");
2070 _num_total_oops ++;
2071 narrowOop v = *p;
|
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/archiveBuilder.hpp"
29 #include "cds/archiveHeapLoader.hpp"
30 #include "cds/archiveHeapWriter.hpp"
31 #include "cds/archiveUtils.hpp"
32 #include "cds/cdsAccess.hpp"
33 #include "cds/cdsConfig.hpp"
34 #include "cds/cdsEnumKlass.hpp"
35 #include "cds/cdsHeapVerifier.hpp"
36 #include "cds/heapShared.hpp"
37 #include "cds/metaspaceShared.hpp"
38 #include "classfile/classLoaderData.hpp"
39 #include "classfile/classLoaderExt.hpp"
40 #include "classfile/javaClasses.inline.hpp"
41 #include "classfile/modules.hpp"
42 #include "classfile/stringTable.hpp"
43 #include "classfile/symbolTable.hpp"
44 #include "classfile/systemDictionary.hpp"
45 #include "classfile/systemDictionaryShared.hpp"
46 #include "classfile/vmClasses.hpp"
47 #include "classfile/vmSymbols.hpp"
48 #include "gc/shared/collectedHeap.hpp"
49 #include "gc/shared/gcLocker.hpp"
50 #include "gc/shared/gcVMOperations.hpp"
51 #include "logging/log.hpp"
52 #include "logging/logStream.hpp"
71 #include "gc/g1/g1CollectedHeap.hpp"
72 #endif
73
74 #if INCLUDE_CDS_JAVA_HEAP
75
76 struct ArchivableStaticFieldInfo {
77 const char* klass_name;
78 const char* field_name;
79 InstanceKlass* klass;
80 int offset;
81 BasicType type;
82
83 ArchivableStaticFieldInfo(const char* k, const char* f)
84 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
85
86 bool valid() {
87 return klass_name != nullptr;
88 }
89 };
90
91 class HeapShared::ContextMark : public StackObj {
92 ResourceMark rm;
93 public:
94 ContextMark(const char* c) : rm{} {
95 _context->push(c);
96 }
97 ~ContextMark() {
98 _context->pop();
99 }
100 };
101
102 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
103
104 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
105 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
106 size_t HeapShared::_total_obj_count;
107 size_t HeapShared::_total_obj_size;
108
109 #ifndef PRODUCT
110 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
111 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
112 static const char* _test_class_name = nullptr;
113 static Klass* _test_class = nullptr;
114 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
115 #endif
116
117
118 //
119 // If you add new entries to the following tables, you should know what you're doing!
120 //
121
122 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
123 {"java/lang/Integer$IntegerCache", "archivedCache"},
124 {"java/lang/Long$LongCache", "archivedCache"},
125 {"java/lang/Byte$ByteCache", "archivedCache"},
126 {"java/lang/Short$ShortCache", "archivedCache"},
127 {"java/lang/Character$CharacterCache", "archivedCache"},
128 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
129 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
130 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
131 {"java/util/ImmutableCollections", "archivedObjects"},
132 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
133 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
134 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
135 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
136
137 #ifndef PRODUCT
138 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
139 #endif
140 {nullptr, nullptr},
141 };
142
143 // full module graph
144 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
145 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
146 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
147 {"java/lang/Module$ArchivedData", "archivedData"},
148 {nullptr, nullptr},
149 };
150
151 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
152 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
153 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
154 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
155 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
156 int HeapShared::_root_segment_max_size_elems;
157 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
158 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
159 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
160
161 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
162 for (int i = 0; fields[i].valid(); i++) {
163 if (fields[i].klass == ik) {
164 return true;
165 }
166 }
167 return false;
168 }
169
170 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
171 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
172 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
173 }
174
224 Handle boot_loader(THREAD, result.get_oop());
225 reset_states(boot_loader(), CHECK);
226 }
227
228 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
229
230 bool HeapShared::has_been_archived(oop obj) {
231 assert(CDSConfig::is_dumping_heap(), "dump-time only");
232 return archived_object_cache()->get(obj) != nullptr;
233 }
234
235 int HeapShared::append_root(oop obj) {
236 assert(CDSConfig::is_dumping_heap(), "dump-time only");
237 if (obj != nullptr) {
238 assert(has_been_archived(obj), "must be");
239 }
240 // No GC should happen since we aren't scanning _pending_roots.
241 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
242
243 if (_pending_roots == nullptr) {
244 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
245 }
246
247 OopHandle oh(Universe::vm_global(), obj);
248 return _pending_roots->append(oh);
249 }
250
251 objArrayOop HeapShared::root_segment(int segment_idx) {
252 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
253 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
254 } else {
255 assert(CDSConfig::is_using_archive(), "must be");
256 }
257
258 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
259 assert(segment != nullptr, "should have been initialized");
260 return segment;
261 }
262
263 inline unsigned int oop_handle_hash(const OopHandle& oh) {
264 oop o = oh.resolve();
265 if (o == nullptr) {
266 return 0;
267 } else {
268 return o->identity_hash();
269 }
270 }
271
272 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
273 return a.resolve() == b.resolve();
274 }
275
276 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
277 36137, // prime number
278 AnyObj::C_HEAP,
279 mtClassShared,
280 oop_handle_hash,
281 oop_handle_equals> {};
282
283 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
284
285 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
286 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
287 if (_orig_to_scratch_object_table == nullptr) {
288 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
289 }
290
291 OopHandle orig_h(Universe::vm_global(), orig_obj);
292 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
293 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
294 }
295
296 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
297 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
298 if (_orig_to_scratch_object_table != nullptr) {
299 OopHandle orig(&orig_obj);
300 OopHandle* v = _orig_to_scratch_object_table->get(orig);
301 if (v != nullptr) {
302 return v->resolve();
303 }
304 }
305 return nullptr;
306 }
307
308 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
309 // to Strings and MH oops.
310 //
311 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
312 // and are accssed vis CDSAccess::get_archived_object(int).
313 struct PermanentOopInfo {
314 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
315 int _heap_offset; // Offset of the object from the bottom of the archived heap.
316 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
317 };
318
319 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
320 36137, // prime number
321 AnyObj::C_HEAP,
322 mtClassShared,
323 oop_handle_hash,
324 oop_handle_equals> {};
325
326 static int _dumptime_permanent_oop_count = 0;
327 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
328 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
329
330 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
331 // so we can remember their offset (from the bottom of the archived heap).
332 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
333 assert_at_safepoint();
334 if (_dumptime_permanent_oop_table == nullptr) {
335 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
336 }
337
338 PermanentOopInfo info(-1, offset);
339 OopHandle oh(Universe::vm_global(), obj);
340 _dumptime_permanent_oop_table->put_when_absent(oh, info);
341 }
342
343 // A permanent index is assigned to an archived object ONLY when
344 // the AOT compiler calls this function.
345 int HeapShared::get_archived_object_permanent_index(oop obj) {
346 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
347
348 if (!CDSConfig::is_dumping_heap()) {
349 return -1; // Called by the Leyden old workflow
350 }
351 if (_dumptime_permanent_oop_table == nullptr) {
352 return -1;
353 }
354
355 if (_orig_to_scratch_object_table != nullptr) {
356 OopHandle orig(&obj);
357 OopHandle* v = _orig_to_scratch_object_table->get(orig);
358 if (v != nullptr) {
359 obj = v->resolve();
360 }
361 }
362
363 OopHandle tmp(&obj);
364 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
365 if (info == nullptr) {
366 return -1;
367 } else {
368 if (info->_index < 0) {
369 info->_index = _dumptime_permanent_oop_count++;
370 }
371 return info->_index;
372 }
373 }
374
375 oop HeapShared::get_archived_object(int permanent_index) {
376 assert(permanent_index >= 0, "sanity");
377 assert(ArchiveHeapLoader::is_in_use(), "sanity");
378 assert(_runtime_permanent_oops != nullptr, "sanity");
379
380 return _runtime_permanent_oops->at(permanent_index).resolve();
381 }
382
383 // Remember all archived heap objects that have a permanent index.
384 // table[i] = offset of oop whose permanent index is i.
385 void CachedCodeDirectoryInternal::dumptime_init_internal() {
386 const int count = _dumptime_permanent_oop_count;
387 if (count == 0) {
388 // Avoid confusing CDS code with zero-sized tables, just return.
389 log_info(cds)("No permanent oops");
390 _permanent_oop_count = count;
391 _permanent_oop_offsets = nullptr;
392 return;
393 }
394
395 int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
396 for (int i = 0; i < count; i++) {
397 table[count] = -1;
398 }
399 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
400 int index = info._index;
401 if (index >= 0) {
402 assert(index < count, "sanity");
403 table[index] = info._heap_offset;
404 }
405 return true; // continue
406 });
407
408 for (int i = 0; i < count; i++) {
409 assert(table[i] >= 0, "must be");
410 }
411
412 log_info(cds)("Dumped %d permanent oops", count);
413
414 _permanent_oop_count = count;
415 CDSAccess::set_pointer(&_permanent_oop_offsets, table);
416 }
417
418 // This is called during the bootstrap of the production run, before any GC can happen.
419 // Record each permanent oop in a OopHandle for GC safety.
420 void CachedCodeDirectoryInternal::runtime_init_internal() {
421 int count = _permanent_oop_count;
422 int* table = _permanent_oop_offsets;
423 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
424 for (int i = 0; i < count; i++) {
425 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
426 OopHandle oh(Universe::vm_global(), obj);
427 _runtime_permanent_oops->append(oh);
428 }
429 };
430
431 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
432 assert(_root_segment_max_size_elems > 0, "sanity");
433
434 // Try to avoid divisions for the common case.
435 if (idx < _root_segment_max_size_elems) {
436 seg_idx = 0;
437 int_idx = idx;
438 } else {
439 seg_idx = idx / _root_segment_max_size_elems;
440 int_idx = idx % _root_segment_max_size_elems;
441 }
442
443 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
444 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
445 }
446
447 // Returns an objArray that contains all the roots of the archived objects
448 oop HeapShared::get_root(int index, bool clear) {
449 assert(index >= 0, "sanity");
450 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
455 if (clear) {
456 clear_root(index);
457 }
458 return result;
459 }
460
461 void HeapShared::clear_root(int index) {
462 assert(index >= 0, "sanity");
463 assert(CDSConfig::is_using_archive(), "must be");
464 if (ArchiveHeapLoader::is_in_use()) {
465 int seg_idx, int_idx;
466 get_segment_indexes(index, seg_idx, int_idx);
467 if (log_is_enabled(Debug, cds, heap)) {
468 oop old = root_segment(seg_idx)->obj_at(int_idx);
469 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
470 }
471 root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
472 }
473 }
474
475 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
476 assert(CDSConfig::is_dumping_heap(), "dump-time only");
477
478 assert(!obj->is_stackChunk(), "do not archive stack chunks");
479 if (has_been_archived(obj)) {
480 return true;
481 }
482
483 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
484 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
485 p2i(obj), obj->size());
486 debug_trace();
487 return false;
488 } else {
489 count_allocation(obj->size());
490 ArchiveHeapWriter::add_source_obj(obj);
491 CachedOopInfo info = make_cached_oop_info(obj, referrer);
492 archived_object_cache()->put_when_absent(obj, info);
493 archived_object_cache()->maybe_grow();
494 mark_native_pointers(obj);
495
496 Klass* k = obj->klass();
497 if (k->is_instance_klass()) {
498 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
499 // This ensures that during the production run, whenever Java code sees a cached object
500 // of type X, we know that X is already initialized. (see TODO comment below ...)
501
502 if (InstanceKlass::cast(k)->is_enum_subclass()
503 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
504 // we must store them as AOT-initialized.
505 || (subgraph_info == _dump_time_special_subgraph))
506 // TODO: we do this only for the special subgraph for now. Extending this to
507 // other subgraphs would require more refactoring of the core library (such as
508 // move some initialization logic into runtimeSetup()).
509 //
510 // For the other subgraphs, we have a weaker mechanism to ensure that
511 // all classes in a subgraph are initialized before the subgraph is programmatically
558 return nullptr;
559 }
560 }
561 void set_oop(MetaspaceObj* ptr, oop o) {
562 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
563 OopHandle handle(Universe::vm_global(), o);
564 bool is_new = put(ptr, handle);
565 assert(is_new, "cannot set twice");
566 }
567 void remove_oop(MetaspaceObj* ptr) {
568 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
569 OopHandle* handle = get(ptr);
570 if (handle != nullptr) {
571 handle->release(Universe::vm_global());
572 remove(ptr);
573 }
574 }
575 };
576
577 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
578 if (_scratch_references_table == nullptr) {
579 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
580 }
581 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
582 _scratch_references_table->set_oop(src, dest);
583 }
584 }
585
586 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
587 return (objArrayOop)_scratch_references_table->get_oop(src);
588 }
589
590 void HeapShared::init_dumping() {
591 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
592 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
593 }
594
595 void HeapShared::init_scratch_objects(TRAPS) {
596 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
597 BasicType bt = (BasicType)i;
598 if (!is_reference_type(bt)) {
599 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
600 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
601 track_scratch_object(Universe::java_mirror(bt), m);
602 }
603 }
604 }
605
606 // Given java_mirror that represents a (primitive or reference) type T,
607 // return the "scratch" version that represents the same type T.
608 // Note that if java_mirror will be returned if it's already a
609 // scratch mirror.
610 //
611 // See java_lang_Class::create_scratch_mirror() for more info.
612 oop HeapShared::scratch_java_mirror(oop java_mirror) {
613 assert(java_lang_Class::is_instance(java_mirror), "must be");
614
615 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
616 BasicType bt = (BasicType)i;
617 if (!is_reference_type(bt)) {
618 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
619 return java_mirror;
620 }
621 }
622 }
623
624 if (java_lang_Class::is_primitive(java_mirror)) {
625 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
626 } else {
627 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
628 }
629 }
630
631 oop HeapShared::scratch_java_mirror(BasicType t) {
632 assert((uint)t < T_VOID+1, "range check");
633 assert(!is_reference_type(t), "sanity");
634 return _scratch_basic_type_mirrors[t].resolve();
635 }
636
637 oop HeapShared::scratch_java_mirror(Klass* k) {
638 return _scratch_java_mirror_table->get_oop(k);
639 }
640
641 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
642 track_scratch_object(k->java_mirror(), mirror);
643 _scratch_java_mirror_table->set_oop(k, mirror);
644 }
645
646 void HeapShared::remove_scratch_objects(Klass* k) {
647 // Klass is being deallocated. Java mirror can still be alive, and it should not
648 // point to dead klass. We need to break the link from mirror to the Klass.
649 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
650 oop mirror = _scratch_java_mirror_table->get_oop(k);
651 if (mirror != nullptr) {
652 java_lang_Class::set_klass(mirror, nullptr);
653 }
654 _scratch_java_mirror_table->remove_oop(k);
655 if (k->is_instance_klass()) {
656 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
657 }
658 if (mirror != nullptr) {
659 OopHandle tmp(&mirror);
660 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
661 if (v != nullptr) {
662 oop scratch_mirror = v->resolve();
663 java_lang_Class::set_klass(scratch_mirror, nullptr);
664 _orig_to_scratch_object_table->remove(tmp);
665 }
666 }
667 }
668
669 //TODO: we eventually want a more direct test for these kinds of things.
670 //For example the JVM could record some bit of context from the creation
671 //of the klass, such as who called the hidden class factory. Using
672 //string compares on names is fragile and will break as soon as somebody
673 //changes the names in the JDK code. See discussion in JDK-8342481 for
674 //related ideas about marking AOT-related classes.
675 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
676 return ik->is_hidden() &&
677 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
678 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
679 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
680 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
681 }
682
683 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
684 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
685 }
686
830 assert(info != nullptr, "must be");
831 has_oop_pointers = info->has_oop_pointers();
832 has_native_pointers = info->has_native_pointers();
833 }
834
835 void HeapShared::set_has_native_pointers(oop src_obj) {
836 CachedOopInfo* info = archived_object_cache()->get(src_obj);
837 assert(info != nullptr, "must be");
838 info->set_has_native_pointers();
839 }
840
841 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
842 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
843 void HeapShared::start_scanning_for_oops() {
844 {
845 NoSafepointVerifier nsv;
846
847 // The special subgraph doesn't belong to any class. We use Object_klass() here just
848 // for convenience.
849 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
850 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
851
852 // Cache for recording where the archived objects are copied to
853 create_archived_object_cache();
854
855 if (UseCompressedOops || UseG1GC) {
856 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
857 UseCompressedOops ? p2i(CompressedOops::begin()) :
858 p2i((address)G1CollectedHeap::heap()->reserved().start()),
859 UseCompressedOops ? p2i(CompressedOops::end()) :
860 p2i((address)G1CollectedHeap::heap()->reserved().end()));
861 }
862
863 archive_subgraphs();
864 }
865
866 init_seen_objects_table();
867 Universe::archive_exception_instances();
868 }
869
870 void HeapShared::end_scanning_for_oops() {
871 archive_strings();
872 delete_seen_objects_table();
873 }
874
875 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
876 {
877 NoSafepointVerifier nsv;
878 if (!SkipArchiveHeapVerification) {
879 CDSHeapVerifier::verify();
880 }
881 check_special_subgraph_classes();
882 }
883
884 StringTable::write_shared_table(_dumped_interned_strings);
885 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
886 for (int i = 0; i < _pending_roots->length(); i++) {
887 roots->append(_pending_roots->at(i).resolve());
888 }
889 ArchiveHeapWriter::write(roots, heap_info);
890 delete roots;
891
892 ArchiveBuilder::OtherROAllocMark mark;
893 write_subgraph_info_table();
894 }
895
896 void HeapShared::scan_java_mirror(oop orig_mirror) {
897 oop m = scratch_java_mirror(orig_mirror);
898 if (m != nullptr) { // nullptr if for custom class loader
899 copy_java_mirror_hashcode(orig_mirror, m);
900 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
901 assert(success, "sanity");
902 }
903 }
904
905 void HeapShared::scan_java_class(Klass* orig_k) {
906 scan_java_mirror(orig_k->java_mirror());
907
908 if (orig_k->is_instance_klass()) {
909 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
910 orig_ik->constants()->prepare_resolved_references_for_archiving();
1017 return;
1018 }
1019 } else {
1020 assert(orig_k->is_typeArray_klass(), "must be");
1021 // Primitive type arrays are created early during Universe::genesis.
1022 return;
1023 }
1024
1025 if (log_is_enabled(Debug, cds, heap)) {
1026 if (!_subgraph_object_klasses->contains(orig_k)) {
1027 ResourceMark rm;
1028 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
1029 }
1030 }
1031
1032 _subgraph_object_klasses->append_if_missing(orig_k);
1033 _has_non_early_klasses |= is_non_early_klass(orig_k);
1034 }
1035
1036 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1037 if (CDSConfig::is_dumping_invokedynamic()) {
1038 // FIXME -- this allows LambdaProxy classes
1039 return;
1040 }
1041 if (ik->module()->name() == vmSymbols::java_base()) {
1042 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
1043 return;
1044 }
1045
1046 const char* lambda_msg = "";
1047 if (CDSConfig::is_dumping_invokedynamic()) {
1048 lambda_msg = ", or a lambda proxy class";
1049 if (HeapShared::is_lambda_proxy_klass(ik) &&
1050 (ik->class_loader() == nullptr ||
1051 ik->class_loader() == SystemDictionary::java_platform_loader() ||
1052 ik->class_loader() == SystemDictionary::java_system_loader())) {
1053 return;
1054 }
1055 }
1056
1057 #ifndef PRODUCT
1058 if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) {
1059 // This class is loaded by ArchiveHeapTestClass
1060 return;
1272 which, k->external_name());
1273 FlagSetting fs1(VerifyBeforeGC, true);
1274 FlagSetting fs2(VerifyDuringGC, true);
1275 FlagSetting fs3(VerifyAfterGC, true);
1276 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1277 }
1278 }
1279 }
1280
1281 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1282 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1283 //
1284 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1285 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1286 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1287 void HeapShared::resolve_classes(JavaThread* current) {
1288 assert(CDSConfig::is_using_archive(), "runtime only!");
1289 if (!ArchiveHeapLoader::is_in_use()) {
1290 return; // nothing to do
1291 }
1292
1293 if (!CDSConfig::is_using_aot_linked_classes()) {
1294 assert( _run_time_special_subgraph != nullptr, "must be");
1295 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1296 if (klasses != nullptr) {
1297 for (int i = 0; i < klasses->length(); i++) {
1298 Klass* k = klasses->at(i);
1299 ExceptionMark em(current); // no exception can happen here
1300 resolve_or_init(k, /*do_init*/false, current);
1301 }
1302 }
1303 }
1304
1305 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1306 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1307 }
1308
1309 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1310 for (int i = 0; fields[i].valid(); i++) {
1311 ArchivableStaticFieldInfo* info = &fields[i];
1312 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1313 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1314 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1315 resolve_classes_for_subgraph_of(current, k);
1316 }
1317 }
1318
1319 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1320 JavaThread* THREAD = current;
1321 ExceptionMark em(THREAD);
1322 const ArchivedKlassSubGraphInfoRecord* record =
1323 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1324 if (HAS_PENDING_EXCEPTION) {
1557
1558 verify_the_heap(k, "after ");
1559 }
1560
1561 void HeapShared::clear_archived_roots_of(Klass* k) {
1562 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1563 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1564 if (record != nullptr) {
1565 Array<int>* entry_field_records = record->entry_field_records();
1566 if (entry_field_records != nullptr) {
1567 int efr_len = entry_field_records->length();
1568 assert(efr_len % 2 == 0, "sanity");
1569 for (int i = 0; i < efr_len; i += 2) {
1570 int root_index = entry_field_records->at(i+1);
1571 clear_root(root_index);
1572 }
1573 }
1574 }
1575 }
1576
1577 // Push all oops that are referenced by _referencing_obj onto the _stack.
1578 class HeapShared::ReferentPusher: public BasicOopIterateClosure {
1579 PendingOopStack* _stack;
1580 GrowableArray<oop> _found_oop_fields;
1581 int _level;
1582 bool _record_klasses_only;
1583 KlassSubGraphInfo* _subgraph_info;
1584 oop _referencing_obj;
1585 public:
1586 ReferentPusher(PendingOopStack* stack,
1587 int level,
1588 bool record_klasses_only,
1589 KlassSubGraphInfo* subgraph_info,
1590 oop orig) :
1591 _stack(stack),
1592 _found_oop_fields(),
1593 _level(level),
1594 _record_klasses_only(record_klasses_only),
1595 _subgraph_info(subgraph_info),
1596 _referencing_obj(orig) {
1597 }
1598 void do_oop(narrowOop *p) { ReferentPusher::do_oop_work(p); }
1599 void do_oop( oop *p) { ReferentPusher::do_oop_work(p); }
1600
1601 ~ReferentPusher() {
1602 while (_found_oop_fields.length() > 0) {
1603 // This produces the exact same traversal order as the previous version
1604 // of ReferentPusher that recurses on the C stack -- a depth-first search,
1605 // walking the oop fields in _referencing_obj by ascending field offsets.
1606 oop obj = _found_oop_fields.pop();
1607 _stack->push(PendingOop(obj, _referencing_obj, _level + 1));
1608 }
1609 }
1610
1611 protected:
1612 template <class T> void do_oop_work(T *p) {
1613 oop obj = RawAccess<>::oop_load(p);
1614 if (!CompressedOops::is_null(obj)) {
1615 size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1616
1617 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1618 ResourceMark rm;
1619 log_debug(cds, heap)("(%d) %s[%zu] ==> " PTR_FORMAT " size %zu %s", _level,
1620 _referencing_obj->klass()->external_name(), field_delta,
1621 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1622 if (log_is_enabled(Trace, cds, heap)) {
1623 LogTarget(Trace, cds, heap) log;
1624 LogStream out(log);
1625 obj->print_on(&out);
1626 }
1627 }
1628
1629 _found_oop_fields.push(obj);
1630 }
1631 }
1632
1633 public:
1634 oop referencing_obj() { return _referencing_obj; }
1635 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; }
1636 };
1637
1638
1639 // Checks if an oop has any non-null oop fields
1640 class PointsToOopsChecker : public BasicOopIterateClosure {
1641 bool _result;
1642
1643 template <class T> void check(T *p) {
1644 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1645 }
1646
1647 public:
1648 PointsToOopsChecker() : _result(false) {}
1649 void do_oop(narrowOop *p) { check(p); }
1650 void do_oop( oop *p) { check(p); }
1651 bool result() { return _result; }
1652 };
1653
1654 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1655 PointsToOopsChecker points_to_oops_checker;
1656 obj->oop_iterate(&points_to_oops_checker);
1657 return CachedOopInfo(referrer, points_to_oops_checker.result());
1658 }
1659
1660 void HeapShared::init_box_classes(TRAPS) {
1661 if (ArchiveHeapLoader::is_in_use()) {
1662 vmClasses::Boolean_klass()->initialize(CHECK);
1663 vmClasses::Character_klass()->initialize(CHECK);
1664 vmClasses::Float_klass()->initialize(CHECK);
1665 vmClasses::Double_klass()->initialize(CHECK);
1666 vmClasses::Byte_klass()->initialize(CHECK);
1667 vmClasses::Short_klass()->initialize(CHECK);
1668 vmClasses::Integer_klass()->initialize(CHECK);
1669 vmClasses::Long_klass()->initialize(CHECK);
1670 vmClasses::Void_klass()->initialize(CHECK);
1671 }
1672 }
1673
1674 void HeapShared::exit_on_error() {
1675 if (_context != nullptr) {
1676 ResourceMark rm;
1677 LogStream ls(Log(cds, heap)::error());
1678 ls.print_cr("Context");
1679 for (int i = 0; i < _context->length(); i++) {
1680 const char* s = _context->at(i);
1681 ls.print_cr("- %s", s);
1682 }
1683 }
1684 debug_trace();
1685 MetaspaceShared::unrecoverable_writing_error();
1686 }
1687
1688 // (1) If orig_obj has not been archived yet, archive it.
1689 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1690 // trace all objects that are reachable from it, and make sure these objects are archived.
1691 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1692 // were already archived when this function is called)
1693 bool HeapShared::archive_reachable_objects_from(int level,
1694 KlassSubGraphInfo* subgraph_info,
1695 oop orig_obj) {
1696 PendingOopStack stack;
1697 stack.push(PendingOop(orig_obj, nullptr, level));
1698
1699 while (stack.length() > 0) {
1700 PendingOop po = stack.pop();
1701 _object_being_archived = po;
1702 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1703 _object_being_archived = PendingOop();
1704
1705 if (!status) {
1706 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1707 // as the Java code will take care of initializing this field dynamically.
1708 assert(level == 1, "VM should have exited with unarchivable objects for _level > 1");
1709 return false;
1710 }
1711 }
1712
1713 return true;
1714 }
1715
1716 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info,
1717 oop orig_obj, oop referrer) {
1718 assert(orig_obj != nullptr, "must be");
1719 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1720 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1721 // If you get an error here, you probably made a change in the JDK library that has added
1722 // these objects that are referenced (directly or indirectly) by static fields.
1723 ResourceMark rm;
1724 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1725 exit_on_error();
1726 }
1727
1728 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1729 ResourceMark rm;
1730 LogTarget(Debug, cds, heap) log;
1731 LogStream out(log);
1732 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1733 Klass* k = java_lang_Class::as_Klass(orig_obj);
1734 if (k != nullptr) {
1735 out.print("%s", k->external_name());
1736 } else {
1737 out.print("primitive");
1738 }
1739 out.print_cr("; scratch mirror = " PTR_FORMAT,
1740 p2i(scratch_java_mirror(orig_obj)));
1741 }
1742
1743 if (CDSConfig::is_initing_classes_at_dump_time()) {
1744 if (java_lang_Class::is_instance(orig_obj)) {
1745 orig_obj = scratch_java_mirror(orig_obj);
1766 // If you get an error here, you probably made a change in the JDK library that has added a Class
1767 // object that is referenced (directly or indirectly) by an ArchivableStaticFieldInfo
1768 // defined at the top of this file.
1769 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1770 debug_trace();
1771 MetaspaceShared::unrecoverable_writing_error();
1772 }
1773 }
1774
1775 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1776 // orig_obj has already been archived and traced. Nothing more to do.
1777 return true;
1778 } else {
1779 set_has_been_seen_during_subgraph_recording(orig_obj);
1780 }
1781
1782 bool already_archived = has_been_archived(orig_obj);
1783 bool record_klasses_only = already_archived;
1784 if (!already_archived) {
1785 ++_num_new_archived_objs;
1786 if (!archive_object(orig_obj, referrer, subgraph_info)) {
1787 // Skip archiving the sub-graph referenced from the current entry field.
1788 ResourceMark rm;
1789 log_error(cds, heap)(
1790 "Cannot archive the sub-graph referenced from %s object ("
1791 PTR_FORMAT ") size %zu, skipped.",
1792 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1793 if (level == 1) {
1794 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1795 // as the Java code will take care of initializing this field dynamically.
1796 return false;
1797 } else {
1798 // We don't know how to handle an object that has been archived, but some of its reachable
1799 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1800 // we have a real use case.
1801 exit_on_error();
1802 }
1803 }
1804 }
1805
1806 Klass *orig_k = orig_obj->klass();
1807 subgraph_info->add_subgraph_object_klass(orig_k);
1808
1809 {
1810 // Find all the oops that are referenced by orig_obj, push them onto the stack
1811 // so we can work on them next.
1812 ResourceMark rm;
1813 ReferentPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj);
1814 orig_obj->oop_iterate(&pusher);
1815 }
1816
1817 if (CDSConfig::is_initing_classes_at_dump_time()) {
1818 // The enum klasses are archived with aot-initialized mirror.
1819 // See AOTClassInitializer::can_archive_initialized_mirror().
1820 } else {
1821 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1822 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1823 }
1824 }
1825
1826 return true;
1827 }
1828
1829 //
1830 // Start from the given static field in a java mirror and archive the
1831 // complete sub-graph of java heap objects that are reached directly
1832 // or indirectly from the starting object by following references.
1833 // Sub-graph archiving restrictions (current):
1834 //
1835 // - All classes of objects in the archived sub-graph (including the
1836 // entry class) must be boot class only.
1837 // - No java.lang.Class instance (java mirror) can be included inside
1838 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1839 //
1840 // The Java heap object sub-graph archiving process (see ReferentPusher):
1841 //
1842 // 1) Java object sub-graph archiving starts from a given static field
1843 // within a Class instance (java mirror). If the static field is a
1844 // reference field and points to a non-null java object, proceed to
1845 // the next step.
1846 //
1847 // 2) Archives the referenced java object. If an archived copy of the
1848 // current object already exists, updates the pointer in the archived
1849 // copy of the referencing object to point to the current archived object.
1850 // Otherwise, proceed to the next step.
1851 //
1852 // 3) Follows all references within the current java object and recursively
1853 // archive the sub-graph of objects starting from each reference.
1854 //
1855 // 4) Updates the pointer in the archived copy of referencing object to
1856 // point to the current archived object.
1857 //
1858 // 5) The Klass of the current java object is added to the list of Klasses
1859 // for loading and initializing before any object in the archived graph can
1860 // be accessed at runtime.
1968 for (int i = 0; i < num; i++) {
1969 Klass* subgraph_k = klasses->at(i);
1970 Symbol* name = subgraph_k->name();
1971 if (subgraph_k->is_instance_klass() &&
1972 name != vmSymbols::java_lang_Class() &&
1973 name != vmSymbols::java_lang_String() &&
1974 name != vmSymbols::java_lang_ArithmeticException() &&
1975 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1976 name != vmSymbols::java_lang_ArrayStoreException() &&
1977 name != vmSymbols::java_lang_ClassCastException() &&
1978 name != vmSymbols::java_lang_InternalError() &&
1979 name != vmSymbols::java_lang_NullPointerException()) {
1980 ResourceMark rm;
1981 fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
1982 }
1983 }
1984 }
1985 }
1986
1987 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1988 HeapShared::PendingOop HeapShared::_object_being_archived;
1989 int HeapShared::_num_new_walked_objs;
1990 int HeapShared::_num_new_archived_objs;
1991 int HeapShared::_num_old_recorded_klasses;
1992
1993 int HeapShared::_num_total_subgraph_recordings = 0;
1994 int HeapShared::_num_total_walked_objs = 0;
1995 int HeapShared::_num_total_archived_objs = 0;
1996 int HeapShared::_num_total_recorded_klasses = 0;
1997 int HeapShared::_num_total_verifications = 0;
1998
1999 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
2000 return _seen_objects_table->get(obj) != nullptr;
2001 }
2002
2003 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
2004 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
2005 _seen_objects_table->put_when_absent(obj, true);
2006 _seen_objects_table->maybe_grow();
2007 ++ _num_new_walked_objs;
2008 }
2235
2236 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2237 bool is_full_module_graph) {
2238 _num_total_subgraph_recordings = 0;
2239 _num_total_walked_objs = 0;
2240 _num_total_archived_objs = 0;
2241 _num_total_recorded_klasses = 0;
2242 _num_total_verifications = 0;
2243
2244 // For each class X that has one or more archived fields:
2245 // [1] Dump the subgraph of each archived field
2246 // [2] Create a list of all the class of the objects that can be reached
2247 // by any of these static fields.
2248 // At runtime, these classes are initialized before X's archived fields
2249 // are restored by HeapShared::initialize_from_archived_subgraph().
2250 for (int i = 0; fields[i].valid(); ) {
2251 ArchivableStaticFieldInfo* info = &fields[i];
2252 const char* klass_name = info->klass_name;
2253 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2254
2255 ContextMark cm(klass_name);
2256 // If you have specified consecutive fields of the same klass in
2257 // fields[], these will be archived in the same
2258 // {start_recording_subgraph ... done_recording_subgraph} pass to
2259 // save time.
2260 for (; fields[i].valid(); i++) {
2261 ArchivableStaticFieldInfo* f = &fields[i];
2262 if (f->klass_name != klass_name) {
2263 break;
2264 }
2265
2266 ContextMark cm(f->field_name);
2267 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2268 f->offset, f->field_name);
2269 }
2270 done_recording_subgraph(info->klass, klass_name);
2271 }
2272
2273 log_info(cds, heap)("Archived subgraph records = %d",
2274 _num_total_subgraph_recordings);
2275 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2276 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2277 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2278
2279 #ifndef PRODUCT
2280 for (int i = 0; fields[i].valid(); i++) {
2281 ArchivableStaticFieldInfo* f = &fields[i];
2282 verify_subgraph_from_static_field(f->klass, f->offset);
2283 }
2284 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2285 #endif
2286 }
2292 // [2] included in the SharedArchiveConfigFile.
2293 void HeapShared::add_to_dumped_interned_strings(oop string) {
2294 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2295 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2296 bool created;
2297 _dumped_interned_strings->put_if_absent(string, true, &created);
2298 if (created) {
2299 // Prevent string deduplication from changing the value field to
2300 // something not in the archive.
2301 java_lang_String::set_deduplication_forbidden(string);
2302 _dumped_interned_strings->maybe_grow();
2303 }
2304 }
2305
2306 bool HeapShared::is_dumped_interned_string(oop o) {
2307 return _dumped_interned_strings->get(o) != nullptr;
2308 }
2309
2310 void HeapShared::debug_trace() {
2311 ResourceMark rm;
2312 oop referrer = _object_being_archived.referrer();
2313 if (referrer != nullptr) {
2314 LogStream ls(Log(cds, heap)::error());
2315 ls.print_cr("Reference trace");
2316 CDSHeapVerifier::trace_to_root(&ls, referrer);
2317 }
2318 }
2319
2320 #ifndef PRODUCT
2321 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2322 // region. This way we can quickly relocate all the pointers without using
2323 // BasicOopIterateClosure at runtime.
2324 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2325 void* _start;
2326 BitMap *_oopmap;
2327 int _num_total_oops;
2328 int _num_null_oops;
2329 public:
2330 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap)
2331 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {}
2332
2333 virtual void do_oop(narrowOop* p) {
2334 assert(UseCompressedOops, "sanity");
2335 _num_total_oops ++;
2336 narrowOop v = *p;
|