11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveHeapLoader.hpp"
28 #include "cds/archiveHeapWriter.hpp"
29 #include "cds/archiveUtils.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/cdsHeapVerifier.hpp"
32 #include "cds/heapShared.hpp"
33 #include "cds/metaspaceShared.hpp"
34 #include "classfile/classLoaderData.hpp"
35 #include "classfile/javaClasses.inline.hpp"
36 #include "classfile/modules.hpp"
37 #include "classfile/stringTable.hpp"
38 #include "classfile/symbolTable.hpp"
39 #include "classfile/systemDictionary.hpp"
40 #include "classfile/systemDictionaryShared.hpp"
41 #include "classfile/vmClasses.hpp"
42 #include "classfile/vmSymbols.hpp"
43 #include "gc/shared/collectedHeap.hpp"
44 #include "gc/shared/gcLocker.hpp"
45 #include "gc/shared/gcVMOperations.hpp"
46 #include "logging/log.hpp"
47 #include "logging/logStream.hpp"
48 #include "memory/iterator.inline.hpp"
49 #include "memory/resourceArea.hpp"
50 #include "memory/universe.hpp"
51 #include "oops/compressedOops.inline.hpp"
65 #include "gc/g1/g1CollectedHeap.hpp"
66 #endif
67
68 #if INCLUDE_CDS_JAVA_HEAP
69
70 struct ArchivableStaticFieldInfo {
71 const char* klass_name;
72 const char* field_name;
73 InstanceKlass* klass;
74 int offset;
75 BasicType type;
76
77 ArchivableStaticFieldInfo(const char* k, const char* f)
78 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
79
80 bool valid() {
81 return klass_name != nullptr;
82 }
83 };
84
85 bool HeapShared::_disable_writing = false;
86 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
87
88 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
89 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
90 size_t HeapShared::_total_obj_count;
91 size_t HeapShared::_total_obj_size;
92
93 #ifndef PRODUCT
94 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
95 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
96 static const char* _test_class_name = nullptr;
97 static const Klass* _test_class = nullptr;
98 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
99 #endif
100
101
102 //
103 // If you add new entries to the following tables, you should know what you're doing!
104 //
105
106 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
107 {"java/lang/Integer$IntegerCache", "archivedCache"},
108 {"java/lang/Long$LongCache", "archivedCache"},
109 {"java/lang/Byte$ByteCache", "archivedCache"},
110 {"java/lang/Short$ShortCache", "archivedCache"},
111 {"java/lang/Character$CharacterCache", "archivedCache"},
112 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
113 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
114 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
115 {"java/util/ImmutableCollections", "archivedObjects"},
116 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
117 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
118 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
119 #ifndef PRODUCT
120 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
121 #endif
122 {nullptr, nullptr},
123 };
124
125 // full module graph
126 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
127 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
128 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
129 {"java/lang/Module$ArchivedData", "archivedData"},
130 {nullptr, nullptr},
131 };
132
133 KlassSubGraphInfo* HeapShared::_default_subgraph_info;
134 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
135 OopHandle HeapShared::_roots;
136 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
137 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
138 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
139
140 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
141 for (int i = 0; fields[i].valid(); i++) {
142 if (fields[i].klass == ik) {
143 return true;
144 }
145 }
146 return false;
147 }
148
149 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
150 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
151 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
152 }
153
154 unsigned HeapShared::oop_hash(oop const& p) {
155 // Do not call p->identity_hash() as that will update the
156 // object header.
157 return primitive_hash(cast_from_oop<intptr_t>(p));
158 }
208
209 bool HeapShared::has_been_archived(oop obj) {
210 assert(CDSConfig::is_dumping_heap(), "dump-time only");
211 return archived_object_cache()->get(obj) != nullptr;
212 }
213
214 int HeapShared::append_root(oop obj) {
215 assert(CDSConfig::is_dumping_heap(), "dump-time only");
216
217 // No GC should happen since we aren't scanning _pending_roots.
218 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
219
220 if (_pending_roots == nullptr) {
221 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
222 }
223
224 return _pending_roots->append(obj);
225 }
226
227 objArrayOop HeapShared::roots() {
228 if (CDSConfig::is_dumping_heap()) {
229 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
230 if (!HeapShared::can_write()) {
231 return nullptr;
232 }
233 } else {
234 assert(UseSharedSpaces, "must be");
235 }
236
237 objArrayOop roots = (objArrayOop)_roots.resolve();
238 assert(roots != nullptr, "should have been initialized");
239 return roots;
240 }
241
242 // Returns an objArray that contains all the roots of the archived objects
243 oop HeapShared::get_root(int index, bool clear) {
244 assert(index >= 0, "sanity");
245 assert(!CDSConfig::is_dumping_heap() && UseSharedSpaces, "runtime only");
246 assert(!_roots.is_empty(), "must have loaded shared heap");
247 oop result = roots()->obj_at(index);
248 if (clear) {
249 clear_root(index);
250 }
251 return result;
252 }
253
254 void HeapShared::clear_root(int index) {
255 assert(index >= 0, "sanity");
256 assert(UseSharedSpaces, "must be");
257 if (ArchiveHeapLoader::is_in_use()) {
258 if (log_is_enabled(Debug, cds, heap)) {
259 oop old = roots()->obj_at(index);
260 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
261 }
311 return nullptr;
312 }
313 }
314 void set_oop(MetaspaceObj* ptr, oop o) {
315 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
316 OopHandle handle(Universe::vm_global(), o);
317 bool is_new = put(ptr, handle);
318 assert(is_new, "cannot set twice");
319 }
320 void remove_oop(MetaspaceObj* ptr) {
321 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
322 OopHandle* handle = get(ptr);
323 if (handle != nullptr) {
324 handle->release(Universe::vm_global());
325 remove(ptr);
326 }
327 }
328 };
329
330 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
331 _scratch_references_table->set_oop(src, dest);
332 }
333
334 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
335 return (objArrayOop)_scratch_references_table->get_oop(src);
336 }
337
338 void HeapShared::init_scratch_objects(TRAPS) {
339 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
340 BasicType bt = (BasicType)i;
341 if (!is_reference_type(bt)) {
342 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
343 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
344 }
345 }
346 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
347 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
348 }
349
350 oop HeapShared::scratch_java_mirror(BasicType t) {
351 assert((uint)t < T_VOID+1, "range check");
352 assert(!is_reference_type(t), "sanity");
353 return _scratch_basic_type_mirrors[t].resolve();
354 }
355
356 oop HeapShared::scratch_java_mirror(Klass* k) {
357 return _scratch_java_mirror_table->get_oop(k);
358 }
359
360 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
361 _scratch_java_mirror_table->set_oop(k, mirror);
362 }
363
364 void HeapShared::remove_scratch_objects(Klass* k) {
365 _scratch_java_mirror_table->remove_oop(k);
366 if (k->is_instance_klass()) {
367 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
368 }
369 }
370
371 void HeapShared::archive_java_mirrors() {
372 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
373 BasicType bt = (BasicType)i;
374 if (!is_reference_type(bt)) {
375 oop m = _scratch_basic_type_mirrors[i].resolve();
376 assert(m != nullptr, "sanity");
377 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
378 assert(success, "sanity");
379
380 log_trace(cds, heap, mirror)(
381 "Archived %s mirror object from " PTR_FORMAT,
382 type2name(bt), p2i(m));
383
384 Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
385 }
386 }
387
388 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
389 assert(klasses != nullptr, "sanity");
390 for (int i = 0; i < klasses->length(); i++) {
391 Klass* orig_k = klasses->at(i);
392 oop m = scratch_java_mirror(orig_k);
393 if (m != nullptr) {
394 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
395 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
396 guarantee(success, "scratch mirrors must point to only archivable objects");
397 buffered_k->set_archived_java_mirror(append_root(m));
398 ResourceMark rm;
399 log_trace(cds, heap, mirror)(
400 "Archived %s mirror object from " PTR_FORMAT,
401 buffered_k->external_name(), p2i(m));
402
403 // archive the resolved_referenes array
404 if (buffered_k->is_instance_klass()) {
405 InstanceKlass* ik = InstanceKlass::cast(buffered_k);
406 oop rr = ik->constants()->prepare_resolved_references_for_archiving();
407 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
408 bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
409 assert(success, "must be");
410 int root_index = append_root(rr);
411 ik->constants()->cache()->set_archived_references(root_index);
412 }
413 }
414 }
415 }
418 void HeapShared::archive_strings() {
419 oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
420 bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
421 // We must succeed because:
422 // - _dumped_interned_strings do not contain any large strings.
423 // - StringTable::init_shared_table() doesn't create any large arrays.
424 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
425 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
426 }
427
428 int HeapShared::archive_exception_instance(oop exception) {
429 bool success = archive_reachable_objects_from(1, _default_subgraph_info, exception);
430 assert(success, "sanity");
431 return append_root(exception);
432 }
433
434 void HeapShared::mark_native_pointers(oop orig_obj) {
435 if (java_lang_Class::is_instance(orig_obj)) {
436 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
437 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
438 }
439 }
440
441 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
442 CachedOopInfo* info = archived_object_cache()->get(src_obj);
443 assert(info != nullptr, "must be");
444 has_oop_pointers = info->has_oop_pointers();
445 has_native_pointers = info->has_native_pointers();
446 }
447
448 void HeapShared::set_has_native_pointers(oop src_obj) {
449 CachedOopInfo* info = archived_object_cache()->get(src_obj);
450 assert(info != nullptr, "must be");
451 info->set_has_native_pointers();
452 }
453
454 // -- Handling of Enum objects
455 // Java Enum classes have synthetic <clinit> methods that look like this
456 // enum MyEnum {FOO, BAR}
457 // MyEnum::<clinint> {
458 // /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO");
459 // /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR");
460 // }
461 //
462 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must
463 // ensure the archived value equals (in object address) to the runtime value of
464 // MyEnum::FOO.
465 //
466 // However, since MyEnum::<clinint> is synthetically generated by javac, there's
467 // no way of programmatically handling this inside the Java code (as you would handle
468 // ModuleLayer::EMPTY_LAYER, for example).
469 //
470 // Instead, we archive all static field of such Enum classes. At runtime,
471 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull
472 // the static fields out of the archived heap.
473 void HeapShared::check_enum_obj(int level,
474 KlassSubGraphInfo* subgraph_info,
475 oop orig_obj) {
476 assert(level > 1, "must never be called at the first (outermost) level");
477 Klass* k = orig_obj->klass();
478 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
479 if (!k->is_instance_klass()) {
480 return;
481 }
482 InstanceKlass* ik = InstanceKlass::cast(k);
483 if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) {
484 ResourceMark rm;
485 ik->set_has_archived_enum_objs();
486 buffered_k->set_has_archived_enum_objs();
487 oop mirror = ik->java_mirror();
488
489 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
490 if (fs.access_flags().is_static()) {
491 fieldDescriptor& fd = fs.field_descriptor();
492 if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) {
493 guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY",
494 ik->external_name(), fd.name()->as_C_string());
495 }
496 oop oop_field = mirror->obj_field(fd.offset());
497 if (oop_field == nullptr) {
498 guarantee(false, "static field %s::%s must not be null",
499 ik->external_name(), fd.name()->as_C_string());
500 } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) {
501 guarantee(false, "static field %s::%s is of the wrong type",
502 ik->external_name(), fd.name()->as_C_string());
503 }
504 bool success = archive_reachable_objects_from(level, subgraph_info, oop_field);
505 assert(success, "VM should have exited with unarchivable objects for _level > 1");
506 int root_index = append_root(oop_field);
507 log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT ")",
508 root_index, ik->external_name(), fd.name()->as_C_string(),
509 p2i((oopDesc*)oop_field));
510 SystemDictionaryShared::add_enum_klass_static_field(ik, root_index);
511 }
512 }
513 }
514 }
515
516 // See comments in HeapShared::check_enum_obj()
517 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
518 if (!ArchiveHeapLoader::is_in_use()) {
519 return false;
520 }
521
522 RunTimeClassInfo* info = RunTimeClassInfo::get_for(k);
523 assert(info != nullptr, "sanity");
524
525 if (log_is_enabled(Info, cds, heap)) {
526 ResourceMark rm;
527 log_info(cds, heap)("Initializing Enum class: %s", k->external_name());
528 }
529
530 oop mirror = k->java_mirror();
531 int i = 0;
532 for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
533 if (fs.access_flags().is_static()) {
534 int root_index = info->enum_klass_static_field_root_index_at(i++);
535 fieldDescriptor& fd = fs.field_descriptor();
536 assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be");
537 mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true));
538 }
539 }
540 return true;
541 }
542
543 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
544 {
545 NoSafepointVerifier nsv;
546
547 _default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
548
549 // Cache for recording where the archived objects are copied to
550 create_archived_object_cache();
551
552 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
553 UseCompressedOops ? p2i(CompressedOops::begin()) :
554 p2i((address)G1CollectedHeap::heap()->reserved().start()),
555 UseCompressedOops ? p2i(CompressedOops::end()) :
556 p2i((address)G1CollectedHeap::heap()->reserved().end()));
557 copy_objects();
558
559 CDSHeapVerifier::verify();
560 check_default_subgraph_classes();
561 }
562
563 ArchiveHeapWriter::write(_pending_roots, heap_info);
564 }
565
566 void HeapShared::copy_interned_strings() {
567 init_seen_objects_table();
646 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
647 // Only objects of boot classes can be included in sub-graph.
648 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
649 assert(CDSConfig::is_dumping_heap(), "dump time only");
650 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
651
652 if (_subgraph_object_klasses == nullptr) {
653 _subgraph_object_klasses =
654 new (mtClass) GrowableArray<Klass*>(50, mtClass);
655 }
656
657 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
658
659 if (_k == buffered_k) {
660 // Don't add the Klass containing the sub-graph to it's own klass
661 // initialization list.
662 return;
663 }
664
665 if (buffered_k->is_instance_klass()) {
666 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
667 "must be boot class");
668 // vmClasses::xxx_klass() are not updated, need to check
669 // the original Klass*
670 if (orig_k == vmClasses::String_klass() ||
671 orig_k == vmClasses::Object_klass()) {
672 // Initialized early during VM initialization. No need to be added
673 // to the sub-graph object class list.
674 return;
675 }
676 check_allowed_klass(InstanceKlass::cast(orig_k));
677 } else if (buffered_k->is_objArray_klass()) {
678 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
679 if (abk->is_instance_klass()) {
680 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
681 "must be boot class");
682 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
683 }
684 if (buffered_k == Universe::objectArrayKlass()) {
685 // Initialized early during Universe::genesis. No need to be added
686 // to the list.
687 return;
688 }
689 } else {
690 assert(buffered_k->is_typeArray_klass(), "must be");
691 // Primitive type arrays are created early during Universe::genesis.
692 return;
693 }
694
695 if (log_is_enabled(Debug, cds, heap)) {
696 if (!_subgraph_object_klasses->contains(buffered_k)) {
697 ResourceMark rm;
698 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
699 }
700 }
701
702 _subgraph_object_klasses->append_if_missing(buffered_k);
703 _has_non_early_klasses |= is_non_early_klass(orig_k);
704 }
705
706 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
707 if (ik->module()->name() == vmSymbols::java_base()) {
708 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
709 return;
710 }
711
712 #ifndef PRODUCT
713 if (!ik->module()->is_named() && ik->package() == nullptr) {
714 // This class is loaded by ArchiveHeapTestClass
715 return;
716 }
717 const char* extra_msg = ", or in an unnamed package of an unnamed module";
718 #else
719 const char* extra_msg = "";
720 #endif
721
722 ResourceMark rm;
723 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
724 ik->external_name(), extra_msg);
725 MetaspaceShared::unrecoverable_writing_error();
726 }
784 _subgraph_object_klasses =
785 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
786 for (int i = 0; i < num_subgraphs_klasses; i++) {
787 Klass* subgraph_k = subgraph_object_klasses->at(i);
788 if (log_is_enabled(Info, cds, heap)) {
789 ResourceMark rm;
790 log_info(cds, heap)(
791 "Archived object klass %s (%2d) => %s",
792 _k->external_name(), i, subgraph_k->external_name());
793 }
794 _subgraph_object_klasses->at_put(i, subgraph_k);
795 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
796 }
797 }
798
799 ArchivePtrMarker::mark_pointer(&_k);
800 ArchivePtrMarker::mark_pointer(&_entry_field_records);
801 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
802 }
803
804 struct CopyKlassSubGraphInfoToArchive : StackObj {
805 CompactHashtableWriter* _writer;
806 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
807
808 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
809 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
810 ArchivedKlassSubGraphInfoRecord* record =
811 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
812 record->init(&info);
813
814 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
815 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
816 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
817 _writer->add(hash, delta);
818 }
819 return true; // keep on iterating
820 }
821 };
822
823 // Build the records of archived subgraph infos, which include:
824 // - Entry points to all subgraphs from the containing class mirror. The entry
825 // points are static fields in the mirror. For each entry point, the field
826 // offset, and value are recorded in the sub-graph
827 // info. The value is stored back to the corresponding field at runtime.
828 // - A list of klasses that need to be loaded/initialized before archived
829 // java object sub-graph can be accessed at runtime.
830 void HeapShared::write_subgraph_info_table() {
831 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
832 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
833 CompactHashtableStats stats;
834
835 _run_time_subgraph_info_table.reset();
836
837 CompactHashtableWriter writer(d_table->_count, &stats);
838 CopyKlassSubGraphInfoToArchive copy(&writer);
839 d_table->iterate(©);
840 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
841
842 #ifndef PRODUCT
843 if (ArchiveHeapTestClass != nullptr) {
844 size_t len = strlen(ArchiveHeapTestClass) + 1;
845 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
846 strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
847 _archived_ArchiveHeapTestClass = array;
848 }
849 #endif
850 if (log_is_enabled(Info, cds, heap)) {
851 print_stats();
852 }
853 }
854
855 void HeapShared::init_roots(oop roots_oop) {
856 if (roots_oop != nullptr) {
857 assert(ArchiveHeapLoader::is_in_use(), "must be");
858 _roots = OopHandle(Universe::vm_global(), roots_oop);
859 }
860 }
861
862 void HeapShared::serialize_tables(SerializeClosure* soc) {
863
864 #ifndef PRODUCT
865 soc->do_ptr(&_archived_ArchiveHeapTestClass);
866 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
867 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
868 setup_test_class(_test_class_name);
869 }
870 #endif
871
872 _run_time_subgraph_info_table.serialize_header(soc);
873 }
874
875 static void verify_the_heap(Klass* k, const char* which) {
876 if (VerifyArchivedFields > 0) {
877 ResourceMark rm;
878 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
879 which, k->external_name());
880
881 VM_Verify verify_op;
882 VMThread::execute(&verify_op);
883
884 if (VerifyArchivedFields > 1 && is_init_completed()) {
885 // At this time, the oop->klass() of some archived objects in the heap may not
886 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
887 // have enough information (object size, oop maps, etc) so that a GC can be safely
888 // performed.
889 //
890 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
891 // to check for GC safety.
892 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
893 which, k->external_name());
894 FlagSetting fs1(VerifyBeforeGC, true);
895 FlagSetting fs2(VerifyDuringGC, true);
896 FlagSetting fs3(VerifyAfterGC, true);
897 Universe::heap()->collect(GCCause::_java_lang_system_gc);
898 }
899 }
900 }
901
902 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
903 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
904 //
905 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
906 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
907 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
908 void HeapShared::resolve_classes(JavaThread* current) {
909 assert(UseSharedSpaces, "runtime only!");
910 if (!ArchiveHeapLoader::is_in_use()) {
911 return; // nothing to do
912 }
913 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
914 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
915 }
916
917 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
918 for (int i = 0; fields[i].valid(); i++) {
919 ArchivableStaticFieldInfo* info = &fields[i];
920 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
921 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
922 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
923 resolve_classes_for_subgraph_of(current, k);
924 }
925 }
926
927 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
928 JavaThread* THREAD = current;
929 ExceptionMark em(THREAD);
930 const ArchivedKlassSubGraphInfoRecord* record =
931 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
932 if (HAS_PENDING_EXCEPTION) {
933 CLEAR_PENDING_EXCEPTION;
934 }
935 if (record == nullptr) {
936 clear_archived_roots_of(k);
937 }
938 }
939
940 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
941 JavaThread* THREAD = current;
942 if (!ArchiveHeapLoader::is_in_use()) {
943 return; // nothing to do
944 }
945
946 ExceptionMark em(THREAD);
947 const ArchivedKlassSubGraphInfoRecord* record =
948 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
949
950 if (HAS_PENDING_EXCEPTION) {
951 CLEAR_PENDING_EXCEPTION;
952 // None of the field value will be set if there was an exception when initializing the classes.
953 // The java code will not see any of the archived objects in the
954 // subgraphs referenced from k in this case.
955 return;
956 }
957
958 if (record != nullptr) {
959 init_archived_fields_for(k, record);
993 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
994 k->external_name());
995 }
996 return nullptr;
997 }
998
999 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
1000 if (log_is_enabled(Info, cds, heap)) {
1001 ResourceMark rm(THREAD);
1002 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
1003 k->external_name());
1004 }
1005 return nullptr;
1006 }
1007
1008 if (log_is_enabled(Info, cds, heap)) {
1009 ResourceMark rm;
1010 log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1011 }
1012
1013 resolve_or_init(k, do_init, CHECK_NULL);
1014
1015 // Load/link/initialize the klasses of the objects in the subgraph.
1016 // nullptr class loader is used.
1017 Array<Klass*>* klasses = record->subgraph_object_klasses();
1018 if (klasses != nullptr) {
1019 for (int i = 0; i < klasses->length(); i++) {
1020 Klass* klass = klasses->at(i);
1021 if (!klass->is_shared()) {
1022 return nullptr;
1023 }
1024 resolve_or_init(klass, do_init, CHECK_NULL);
1025 }
1026 }
1027 }
1028
1029 return record;
1030 }
1031
1032 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1033 if (!do_init) {
1034 if (k->class_loader_data() == nullptr) {
1035 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1036 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1037 }
1038 } else {
1039 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1040 if (k->is_instance_klass()) {
1041 InstanceKlass* ik = InstanceKlass::cast(k);
1042 ik->initialize(CHECK);
1043 } else if (k->is_objArray_klass()) {
1044 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1045 oak->initialize(CHECK);
1046 }
1047 }
1048 }
1049
1050 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1051 verify_the_heap(k, "before");
1081 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1082 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1083 if (record != nullptr) {
1084 Array<int>* entry_field_records = record->entry_field_records();
1085 if (entry_field_records != nullptr) {
1086 int efr_len = entry_field_records->length();
1087 assert(efr_len % 2 == 0, "sanity");
1088 for (int i = 0; i < efr_len; i += 2) {
1089 int root_index = entry_field_records->at(i+1);
1090 clear_root(root_index);
1091 }
1092 }
1093 }
1094 }
1095
1096 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1097 int _level;
1098 bool _record_klasses_only;
1099 KlassSubGraphInfo* _subgraph_info;
1100 oop _referencing_obj;
1101
1102 // The following are for maintaining a stack for determining
1103 // CachedOopInfo::_referrer
1104 static WalkOopAndArchiveClosure* _current;
1105 WalkOopAndArchiveClosure* _last;
1106 public:
1107 WalkOopAndArchiveClosure(int level,
1108 bool record_klasses_only,
1109 KlassSubGraphInfo* subgraph_info,
1110 oop orig) :
1111 _level(level),
1112 _record_klasses_only(record_klasses_only),
1113 _subgraph_info(subgraph_info),
1114 _referencing_obj(orig) {
1115 _last = _current;
1116 _current = this;
1117 }
1118 ~WalkOopAndArchiveClosure() {
1119 _current = _last;
1120 }
1121 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1122 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1123
1124 protected:
1125 template <class T> void do_oop_work(T *p) {
1126 oop obj = RawAccess<>::oop_load(p);
1127 if (!CompressedOops::is_null(obj)) {
1128 size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1129
1130 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1131 ResourceMark rm;
1132 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1133 _referencing_obj->klass()->external_name(), field_delta,
1134 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1135 if (log_is_enabled(Trace, cds, heap)) {
1136 LogTarget(Trace, cds, heap) log;
1137 LogStream out(log);
1138 obj->print_on(&out);
1139 }
1140 }
1141
1142 bool success = HeapShared::archive_reachable_objects_from(
1143 _level + 1, _subgraph_info, obj);
1144 assert(success, "VM should have exited with unarchivable objects for _level > 1");
1145 }
1146 }
1147
1148 public:
1149 static WalkOopAndArchiveClosure* current() { return _current; }
1159
1160 template <class T> void check(T *p) {
1161 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1162 }
1163
1164 public:
1165 PointsToOopsChecker() : _result(false) {}
1166 void do_oop(narrowOop *p) { check(p); }
1167 void do_oop( oop *p) { check(p); }
1168 bool result() { return _result; }
1169 };
1170
1171 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1172 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1173 oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1174 PointsToOopsChecker points_to_oops_checker;
1175 obj->oop_iterate(&points_to_oops_checker);
1176 return CachedOopInfo(referrer, points_to_oops_checker.result());
1177 }
1178
1179 // (1) If orig_obj has not been archived yet, archive it.
1180 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1181 // trace all objects that are reachable from it, and make sure these objects are archived.
1182 // (3) Record the klasses of all orig_obj and all reachable objects.
1183 bool HeapShared::archive_reachable_objects_from(int level,
1184 KlassSubGraphInfo* subgraph_info,
1185 oop orig_obj) {
1186 assert(orig_obj != nullptr, "must be");
1187
1188 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1189 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1190 // If you get an error here, you probably made a change in the JDK library that has added
1191 // these objects that are referenced (directly or indirectly) by static fields.
1192 ResourceMark rm;
1193 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1194 MetaspaceShared::unrecoverable_writing_error();
1195 }
1196
1197 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
1198 // them as Klass::_archived_mirror because they need to be specially restored at run time.
1199 //
1200 // If you get an error here, you probably made a change in the JDK library that has added a Class
1201 // object that is referenced (directly or indirectly) by static fields.
1202 if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _default_subgraph_info) {
1203 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1204 MetaspaceShared::unrecoverable_writing_error();
1205 }
1206
1207 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1208 // orig_obj has already been archived and traced. Nothing more to do.
1209 return true;
1210 } else {
1211 set_has_been_seen_during_subgraph_recording(orig_obj);
1212 }
1213
1214 bool already_archived = has_been_archived(orig_obj);
1215 bool record_klasses_only = already_archived;
1216 if (!already_archived) {
1217 ++_num_new_archived_objs;
1218 if (!archive_object(orig_obj)) {
1219 // Skip archiving the sub-graph referenced from the current entry field.
1220 ResourceMark rm;
1221 log_error(cds, heap)(
1222 "Cannot archive the sub-graph referenced from %s object ("
1223 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1224 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1225 if (level == 1) {
1226 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1227 // as the Java code will take care of initializing this field dynamically.
1228 return false;
1229 } else {
1230 // We don't know how to handle an object that has been archived, but some of its reachable
1231 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1232 // we have a real use case.
1233 MetaspaceShared::unrecoverable_writing_error();
1234 }
1235 }
1236 }
1237
1238 Klass *orig_k = orig_obj->klass();
1239 subgraph_info->add_subgraph_object_klass(orig_k);
1240
1241 WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1242 orig_obj->oop_iterate(&walker);
1243
1244 check_enum_obj(level + 1, subgraph_info, orig_obj);
1245 return true;
1246 }
1247
1248 //
1249 // Start from the given static field in a java mirror and archive the
1250 // complete sub-graph of java heap objects that are reached directly
1251 // or indirectly from the starting object by following references.
1252 // Sub-graph archiving restrictions (current):
1253 //
1254 // - All classes of objects in the archived sub-graph (including the
1255 // entry class) must be boot class only.
1256 // - No java.lang.Class instance (java mirror) can be included inside
1257 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1258 //
1259 // The Java heap object sub-graph archiving process (see
1260 // WalkOopAndArchiveClosure):
1261 //
1262 // 1) Java object sub-graph archiving starts from a given static field
1263 // within a Class instance (java mirror). If the static field is a
1264 // reference field and points to a non-null java object, proceed to
1342 if (!CompressedOops::is_null(f)) {
1343 verify_subgraph_from(f);
1344 }
1345 }
1346
1347 void HeapShared::verify_subgraph_from(oop orig_obj) {
1348 if (!has_been_archived(orig_obj)) {
1349 // It's OK for the root of a subgraph to be not archived. See comments in
1350 // archive_reachable_objects_from().
1351 return;
1352 }
1353
1354 // Verify that all objects reachable from orig_obj are archived.
1355 init_seen_objects_table();
1356 verify_reachable_objects_from(orig_obj);
1357 delete_seen_objects_table();
1358 }
1359
1360 void HeapShared::verify_reachable_objects_from(oop obj) {
1361 _num_total_verifications ++;
1362 if (!has_been_seen_during_subgraph_recording(obj)) {
1363 set_has_been_seen_during_subgraph_recording(obj);
1364 assert(has_been_archived(obj), "must be");
1365 VerifySharedOopClosure walker;
1366 obj->oop_iterate(&walker);
1367 }
1368 }
1369 #endif
1370
1371 // The "default subgraph" contains special objects (see heapShared.hpp) that
1372 // can be accessed before we load any Java classes (including java/lang/Class).
1373 // Make sure that these are only instances of the very few specific types
1374 // that we can handle.
1375 void HeapShared::check_default_subgraph_classes() {
1376 GrowableArray<Klass*>* klasses = _default_subgraph_info->subgraph_object_klasses();
1377 int num = klasses->length();
1378 for (int i = 0; i < num; i++) {
1379 Klass* subgraph_k = klasses->at(i);
1380 if (log_is_enabled(Info, cds, heap)) {
1381 ResourceMark rm;
1382 log_info(cds, heap)(
1383 "Archived object klass (default subgraph %d) => %s",
1384 i, subgraph_k->external_name());
1385 }
1386
1387 Symbol* name = ArchiveBuilder::current()->get_source_addr(subgraph_k->name());
1388 guarantee(name == vmSymbols::java_lang_Class() ||
1389 name == vmSymbols::java_lang_String() ||
1390 name == vmSymbols::java_lang_ArithmeticException() ||
1391 name == vmSymbols::java_lang_NullPointerException() ||
1392 name == vmSymbols::java_lang_InternalError() ||
1393 name == vmSymbols::object_array_signature() ||
1394 name == vmSymbols::byte_array_signature() ||
1395 name == vmSymbols::char_array_signature(),
1396 "default subgraph can have only these objects");
1397 }
1398 }
1399
1400 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1401 int HeapShared::_num_new_walked_objs;
1402 int HeapShared::_num_new_archived_objs;
1403 int HeapShared::_num_old_recorded_klasses;
1404
1405 int HeapShared::_num_total_subgraph_recordings = 0;
1406 int HeapShared::_num_total_walked_objs = 0;
1407 int HeapShared::_num_total_archived_objs = 0;
1408 int HeapShared::_num_total_recorded_klasses = 0;
1409 int HeapShared::_num_total_verifications = 0;
1410
1411 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1412 return _seen_objects_table->get(obj) != nullptr;
1413 }
1414
1415 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1416 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1462 }
1463 }
1464 }
1465 bool found() { return _found; }
1466 int offset() { return _offset; }
1467 };
1468
1469 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1470 TRAPS) {
1471 for (int i = 0; fields[i].valid(); i++) {
1472 ArchivableStaticFieldInfo* info = &fields[i];
1473 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1474 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name);
1475 ResourceMark rm; // for stringStream::as_string() etc.
1476
1477 #ifndef PRODUCT
1478 bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
1479 const char* test_class_name = ArchiveHeapTestClass;
1480 #else
1481 bool is_test_class = false;
1482 const char* test_class_name = ""; // avoid C++ printf checks warnings.
1483 #endif
1484
1485 if (is_test_class) {
1486 log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name);
1487 }
1488
1489 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
1490 if (HAS_PENDING_EXCEPTION) {
1491 CLEAR_PENDING_EXCEPTION;
1492 stringStream st;
1493 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
1494 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1495 }
1496
1497 if (!k->is_instance_klass()) {
1498 stringStream st;
1499 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
1500 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1501 }
1502
1629 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1630 bool is_full_module_graph) {
1631 _num_total_subgraph_recordings = 0;
1632 _num_total_walked_objs = 0;
1633 _num_total_archived_objs = 0;
1634 _num_total_recorded_klasses = 0;
1635 _num_total_verifications = 0;
1636
1637 // For each class X that has one or more archived fields:
1638 // [1] Dump the subgraph of each archived field
1639 // [2] Create a list of all the class of the objects that can be reached
1640 // by any of these static fields.
1641 // At runtime, these classes are initialized before X's archived fields
1642 // are restored by HeapShared::initialize_from_archived_subgraph().
1643 int i;
1644 for (int i = 0; fields[i].valid(); ) {
1645 ArchivableStaticFieldInfo* info = &fields[i];
1646 const char* klass_name = info->klass_name;
1647 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1648
1649 // If you have specified consecutive fields of the same klass in
1650 // fields[], these will be archived in the same
1651 // {start_recording_subgraph ... done_recording_subgraph} pass to
1652 // save time.
1653 for (; fields[i].valid(); i++) {
1654 ArchivableStaticFieldInfo* f = &fields[i];
1655 if (f->klass_name != klass_name) {
1656 break;
1657 }
1658
1659 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1660 f->offset, f->field_name);
1661 }
1662 done_recording_subgraph(info->klass, klass_name);
1663 }
1664
1665 log_info(cds, heap)("Archived subgraph records = %d",
1666 _num_total_subgraph_recordings);
1667 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
1668 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
1669 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
1670
1671 #ifndef PRODUCT
1672 for (int i = 0; fields[i].valid(); i++) {
1673 ArchivableStaticFieldInfo* f = &fields[i];
1674 verify_subgraph_from_static_field(f->klass, f->offset);
1675 }
1676 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
1677 #endif
1678 }
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveHeapLoader.hpp"
28 #include "cds/archiveHeapWriter.hpp"
29 #include "cds/archiveUtils.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/cdsEnumKlass.hpp"
32 #include "cds/cdsHeapVerifier.hpp"
33 #include "cds/classPreinitializer.hpp"
34 #include "cds/heapShared.hpp"
35 #include "cds/metaspaceShared.hpp"
36 #include "classfile/classLoaderData.hpp"
37 #include "classfile/javaClasses.inline.hpp"
38 #include "classfile/modules.hpp"
39 #include "classfile/stringTable.hpp"
40 #include "classfile/symbolTable.hpp"
41 #include "classfile/systemDictionary.hpp"
42 #include "classfile/systemDictionaryShared.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmSymbols.hpp"
45 #include "gc/shared/collectedHeap.hpp"
46 #include "gc/shared/gcLocker.hpp"
47 #include "gc/shared/gcVMOperations.hpp"
48 #include "logging/log.hpp"
49 #include "logging/logStream.hpp"
50 #include "memory/iterator.inline.hpp"
51 #include "memory/resourceArea.hpp"
52 #include "memory/universe.hpp"
53 #include "oops/compressedOops.inline.hpp"
67 #include "gc/g1/g1CollectedHeap.hpp"
68 #endif
69
70 #if INCLUDE_CDS_JAVA_HEAP
71
72 struct ArchivableStaticFieldInfo {
73 const char* klass_name;
74 const char* field_name;
75 InstanceKlass* klass;
76 int offset;
77 BasicType type;
78
79 ArchivableStaticFieldInfo(const char* k, const char* f)
80 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
81
82 bool valid() {
83 return klass_name != nullptr;
84 }
85 };
86
87 class HeapShared::ArchivingObjectMark : public StackObj {
88 public:
89 ArchivingObjectMark(oop obj) {
90 _trace->push(obj);
91 }
92 ~ArchivingObjectMark() {
93 _trace->pop();
94 }
95 };
96
97 class HeapShared::ContextMark : public StackObj {
98 ResourceMark rm;
99 public:
100 ContextMark(const char* c) : rm{} {
101 _context->push(c);
102 }
103 ~ContextMark() {
104 _context->pop();
105 }
106 };
107
108 bool HeapShared::_disable_writing = false;
109 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
110
111 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
112 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
113 size_t HeapShared::_total_obj_count;
114 size_t HeapShared::_total_obj_size;
115
116 #ifndef PRODUCT
117 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
118 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
119 static const char* _test_class_name = nullptr;
120 static const Klass* _test_class = nullptr;
121 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
122 #endif
123
124
125 //
126 // If you add new entries to the following tables, you should know what you're doing!
127 //
128
129 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
130 {"java/lang/Boolean", "archivedCache"},
131 {"java/lang/Integer$IntegerCache", "archivedCache"},
132 {"java/lang/Long$LongCache", "archivedCache"},
133 {"java/lang/Byte$ByteCache", "archivedCache"},
134 {"java/lang/Short$ShortCache", "archivedCache"},
135 {"java/lang/Character$CharacterCache", "archivedCache"},
136 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
137 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
138 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
139 {"java/util/ImmutableCollections", "archivedObjects"},
140 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
141 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
142 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
143 {"java/lang/invoke/DirectMethodHandle", "archivedObjects"}, // FIXME -- requires PreloadSharedClasses
144 {"java/lang/invoke/MethodType", "archivedObjects"}, // FIXME -- requires PreloadSharedClasses
145 {"java/lang/invoke/LambdaForm$NamedFunction", "archivedObjects"}, // FIXME -- requires PreloadSharedClasses
146 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires PreloadSharedClasses
147 #ifndef PRODUCT
148 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
149 #endif
150 {nullptr, nullptr},
151 };
152
153 // full module graph
154 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
155 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
156 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
157 {"java/lang/Module$ArchivedData", "archivedData"},
158 {nullptr, nullptr},
159 };
160
161 KlassSubGraphInfo* HeapShared::_default_subgraph_info;
162 ArchivedKlassSubGraphInfoRecord* HeapShared::_runtime_default_subgraph_info;
163 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
164 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_trace = nullptr;
165 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
166 OopHandle HeapShared::_roots;
167 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
168 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
169 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
170 int HeapShared::_permobj_segments = 0;
171
172 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
173 for (int i = 0; fields[i].valid(); i++) {
174 if (fields[i].klass == ik) {
175 return true;
176 }
177 }
178 return false;
179 }
180
181 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
182 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
183 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
184 }
185
186 unsigned HeapShared::oop_hash(oop const& p) {
187 // Do not call p->identity_hash() as that will update the
188 // object header.
189 return primitive_hash(cast_from_oop<intptr_t>(p));
190 }
240
241 bool HeapShared::has_been_archived(oop obj) {
242 assert(CDSConfig::is_dumping_heap(), "dump-time only");
243 return archived_object_cache()->get(obj) != nullptr;
244 }
245
246 int HeapShared::append_root(oop obj) {
247 assert(CDSConfig::is_dumping_heap(), "dump-time only");
248
249 // No GC should happen since we aren't scanning _pending_roots.
250 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
251
252 if (_pending_roots == nullptr) {
253 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
254 }
255
256 return _pending_roots->append(obj);
257 }
258
259 objArrayOop HeapShared::roots() {
260 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
261 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
262 if (!HeapShared::can_write()) {
263 return nullptr;
264 }
265 } else {
266 assert(UseSharedSpaces, "must be");
267 }
268
269 objArrayOop roots = (objArrayOop)_roots.resolve();
270 assert(roots != nullptr, "should have been initialized");
271 return roots;
272 }
273
274 static unsigned int oop_handle_hash(const OopHandle& oh) {
275 oop o = oh.resolve();
276 if (o == nullptr) {
277 return 0;
278 } else {
279 return o->identity_hash();
280 }
281 }
282
283 static bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
284 return a.resolve() == b.resolve();
285 }
286
287 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
288 36137, // prime number
289 AnyObj::C_HEAP,
290 mtClassShared,
291 oop_handle_hash,
292 oop_handle_equals> {};
293
294 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
295
296 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
297 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
298 if (_orig_to_scratch_object_table == nullptr) {
299 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
300 }
301
302 OopHandle orig_h(Universe::vm_global(), orig_obj);
303 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
304 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
305 }
306
307 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
308 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
309 if (_orig_to_scratch_object_table != nullptr) {
310 OopHandle orig(&orig_obj);
311 OopHandle* v = _orig_to_scratch_object_table->get(orig);
312 if (v != nullptr) {
313 return v->resolve();
314 }
315 }
316 return nullptr;
317 }
318
319 class ArchivedObjectPermanentIndexTable: public ResourceHashtable<OopHandle, int,
320 36137, // prime number
321 AnyObj::C_HEAP,
322 mtClassShared,
323 oop_handle_hash,
324 oop_handle_equals> {};
325
326 static ArchivedObjectPermanentIndexTable* _permanent_index_table = nullptr;
327
328 void HeapShared::add_to_permanent_index_table(oop obj, int index) {
329 assert_locked_or_safepoint(ArchivedObjectTables_lock);
330
331 if (_permanent_index_table == nullptr) {
332 _permanent_index_table = new (mtClass)ArchivedObjectPermanentIndexTable();
333 }
334 OopHandle oh(Universe::vm_global(), obj);
335 _permanent_index_table->put(oh, index);
336 }
337
338 int HeapShared::get_archived_object_permanent_index(oop obj) {
339 if (!UsePermanentHeapObjects) {
340 return -1;
341 }
342 if (!CDSConfig::is_dumping_heap() && _permobj_segments <= 0) {
343 return -1;
344 }
345
346 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
347
348 if (!CDSConfig::is_dumping_heap() && _permanent_index_table == nullptr) {
349 int first_permobj_segment = roots()->length() - _permobj_segments;
350 for (int i = 0; i < _permobj_segments; i++) {
351 objArrayOop a = (objArrayOop)roots()->obj_at(i + first_permobj_segment);
352 for (int j = 0; j < a->length(); j++) {
353 int index = (i << ArchiveHeapWriter::PERMOBJ_SEGMENT_MAX_SHIFT) + j;
354 add_to_permanent_index_table(a->obj_at(j), index);
355 }
356 }
357 }
358
359 if (_permanent_index_table != nullptr) {
360 if (_orig_to_scratch_object_table != nullptr) {
361 OopHandle orig(&obj);
362 OopHandle* v = _orig_to_scratch_object_table->get(orig);
363 if (v != nullptr) {
364 obj = v->resolve();
365 }
366 }
367 OopHandle tmp(&obj);
368 int* v = _permanent_index_table->get(tmp);
369 if (v != nullptr) {
370 int n = *v;
371 return n;
372 }
373 }
374
375 return -1;
376 }
377
378 oop HeapShared::get_archived_object(int permanent_index) {
379 if (ArchiveHeapLoader::is_in_use()) {
380 assert(_permobj_segments > 0, "must be");
381
382 int first_permobj_segment = roots()->length() - _permobj_segments;
383 int upper = permanent_index >> ArchiveHeapWriter::PERMOBJ_SEGMENT_MAX_SHIFT;
384 int lower = permanent_index & ArchiveHeapWriter::PERMOBJ_SEGMENT_MAX_MASK;
385 objArrayOop a = (objArrayOop)roots()->obj_at(upper + first_permobj_segment);
386 return a->obj_at(lower);
387 } else {
388 assert(CDSConfig::is_dumping_heap(), "must be");
389 return ArchiveHeapWriter::get_perm_object_by_index(permanent_index);
390 }
391 }
392
393 // Returns an objArray that contains all the roots of the archived objects
394 oop HeapShared::get_root(int index, bool clear) {
395 assert(index >= 0, "sanity");
396 assert(!CDSConfig::is_dumping_heap() && UseSharedSpaces, "runtime only");
397 assert(!_roots.is_empty(), "must have loaded shared heap");
398 oop result = roots()->obj_at(index);
399 if (clear) {
400 clear_root(index);
401 }
402 return result;
403 }
404
405 void HeapShared::clear_root(int index) {
406 assert(index >= 0, "sanity");
407 assert(UseSharedSpaces, "must be");
408 if (ArchiveHeapLoader::is_in_use()) {
409 if (log_is_enabled(Debug, cds, heap)) {
410 oop old = roots()->obj_at(index);
411 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
412 }
462 return nullptr;
463 }
464 }
465 void set_oop(MetaspaceObj* ptr, oop o) {
466 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
467 OopHandle handle(Universe::vm_global(), o);
468 bool is_new = put(ptr, handle);
469 assert(is_new, "cannot set twice");
470 }
471 void remove_oop(MetaspaceObj* ptr) {
472 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
473 OopHandle* handle = get(ptr);
474 if (handle != nullptr) {
475 handle->release(Universe::vm_global());
476 remove(ptr);
477 }
478 }
479 };
480
481 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
482 if (_scratch_references_table == nullptr) {
483 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
484 }
485 _scratch_references_table->set_oop(src, dest);
486 }
487
488 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
489 return (objArrayOop)_scratch_references_table->get_oop(src);
490 }
491
492 void HeapShared::init_scratch_objects(TRAPS) {
493 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
494 BasicType bt = (BasicType)i;
495 if (!is_reference_type(bt)) {
496 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
497 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
498 track_scratch_object(Universe::java_mirror(bt), m);
499 }
500 }
501 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
502 if (_scratch_references_table == nullptr) {
503 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
504 }
505 }
506
507 // Given java_mirror that represents a (primitive or reference) type T,
508 // return the "scratch" version that represents the same type T.
509 // Note that if java_mirror will be returned if it's already a
510 // scratch mirror.
511 //
512 // See java_lang_Class::create_scratch_mirror() for more info.
513 oop HeapShared::scratch_java_mirror(oop java_mirror) {
514 assert(java_lang_Class::is_instance(java_mirror), "must be");
515
516 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
517 BasicType bt = (BasicType)i;
518 if (!is_reference_type(bt)) {
519 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
520 return java_mirror;
521 }
522 }
523 }
524
525 if (java_lang_Class::is_primitive(java_mirror)) {
526 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
527 } else {
528 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
529 }
530 }
531
532 oop HeapShared::scratch_java_mirror(BasicType t) {
533 assert((uint)t < T_VOID+1, "range check");
534 assert(!is_reference_type(t), "sanity");
535 return _scratch_basic_type_mirrors[t].resolve();
536 }
537
538 oop HeapShared::scratch_java_mirror(Klass* k) {
539 return _scratch_java_mirror_table->get_oop(k);
540 }
541
542 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
543 track_scratch_object(k->java_mirror(), mirror);
544 _scratch_java_mirror_table->set_oop(k, mirror);
545 }
546
547 void HeapShared::remove_scratch_objects(Klass* k) {
548 _scratch_java_mirror_table->remove_oop(k);
549 if (k->is_instance_klass()) {
550 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
551 }
552 }
553
554 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
555 return ik->is_hidden() &&
556 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
557 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
558 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+"));
559
560 }
561
562 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
563 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
564 }
565
566 bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) {
567 return is_lambda_form_klass(ik) || is_lambda_proxy_klass(ik);
568 }
569
570 void HeapShared::copy_preinitialized_mirror(Klass* orig_k, oop orig_mirror, oop m) {
571 if (!orig_k->is_instance_klass()) {
572 return;
573 }
574 InstanceKlass* ik = InstanceKlass::cast(orig_k);
575
576 if (HeapShared::is_archivable_hidden_klass(ik)) {
577 // We can't rerun the <clinit> method of hidden classes as we don't save
578 // the classData, so we must archive its mirror in initialized state.
579 assert(ik->is_initialized(), "must be");
580 }
581
582 if (!ik->is_initialized() || !ClassPreinitializer::can_archive_preinitialized_mirror(ik)) {
583 return;
584 }
585
586 int nfields = 0;
587 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
588 if (fs.access_flags().is_static()) {
589 fieldDescriptor& fd = fs.field_descriptor();
590 int offset = fd.offset();
591 switch (fd.field_type()) {
592 case T_OBJECT:
593 case T_ARRAY:
594 m->obj_field_put(offset, orig_mirror->obj_field(offset));
595 break;
596 case T_BOOLEAN:
597 m->bool_field_put(offset, orig_mirror->bool_field(offset));
598 break;
599 case T_BYTE:
600 m->byte_field_put(offset, orig_mirror->byte_field(offset));
601 break;
602 case T_SHORT:
603 m->short_field_put(offset, orig_mirror->short_field(offset));
604 break;
605 case T_CHAR:
606 m->char_field_put(offset, orig_mirror->char_field(offset));
607 break;
608 case T_INT:
609 m->int_field_put(offset, orig_mirror->int_field(offset));
610 break;
611 case T_LONG:
612 m->long_field_put(offset, orig_mirror->long_field(offset));
613 break;
614 case T_FLOAT:
615 m->float_field_put(offset, orig_mirror->float_field(offset));
616 break;
617 case T_DOUBLE:
618 m->double_field_put(offset, orig_mirror->double_field(offset));
619 break;
620 default:
621 ShouldNotReachHere();
622 }
623 nfields ++;
624 }
625 }
626 if (log_is_enabled(Info, cds, init)) {
627 ResourceMark rm;
628 log_debug(cds, init)("copied %3d field(s) in preinitialized mirror %s%s", nfields, ik->external_name(),
629 ik->is_hidden() ? " (hidden)" : "");
630 }
631
632 InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(ik);
633 buffered_ik->set_has_preinitialized_mirror();
634 }
635
636 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
637 int src_hash = orig_mirror->identity_hash();
638 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
639 assert(scratch_m->mark().is_unlocked(), "sanity");
640
641 DEBUG_ONLY(int archived_hash = scratch_m->identity_hash());
642 assert(src_hash == archived_hash, "Java mirror wrong hash: original %x, scratch %x", src_hash, archived_hash);
643 }
644
645 void HeapShared::archive_java_mirrors() {
646 ClassPreinitializer::reset_preinit_check();
647
648 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
649 BasicType bt = (BasicType)i;
650 if (!is_reference_type(bt)) {
651 oop orig_mirror = Universe::java_mirror(bt);
652 oop m = _scratch_basic_type_mirrors[i].resolve();
653 assert(m != nullptr, "sanity");
654 copy_java_mirror_hashcode(orig_mirror, m);
655 bool success = archive_reachable_objects_from(1, _default_subgraph_info, orig_mirror);
656 assert(success, "sanity");
657
658 log_trace(cds, heap, mirror)(
659 "Archived %s mirror object from " PTR_FORMAT,
660 type2name(bt), p2i(m));
661
662 Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
663 }
664 }
665
666 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
667 assert(klasses != nullptr, "sanity");
668 for (int i = 0; i < klasses->length(); i++) {
669 Klass* orig_k = klasses->at(i);
670 oop orig_mirror = orig_k->java_mirror();
671 oop m = scratch_java_mirror(orig_k);
672 if (m != nullptr) {
673 copy_java_mirror_hashcode(orig_mirror, m);
674 copy_preinitialized_mirror(orig_k, orig_mirror, m);
675 if (ArchiveReflectionData && java_lang_Class::has_reflection_data(orig_mirror)) {
676 oop reflection_data = java_lang_Class::reflection_data(orig_mirror);
677 bool success = archive_reachable_objects_from(1, _default_subgraph_info, reflection_data);
678 guarantee(success, "");
679 java_lang_Class::set_reflection_data(m, reflection_data);
680 }
681 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
682 bool success = archive_reachable_objects_from(1, _default_subgraph_info, orig_mirror);
683 guarantee(success, "scratch mirrors must point to only archivable objects");
684 buffered_k->set_archived_java_mirror(append_root(m));
685 ResourceMark rm;
686 log_trace(cds, heap, mirror)(
687 "Archived %s mirror object from " PTR_FORMAT,
688 buffered_k->external_name(), p2i(m));
689
690 // archive the resolved_referenes array
691 if (buffered_k->is_instance_klass()) {
692 InstanceKlass* ik = InstanceKlass::cast(buffered_k);
693 oop rr = ik->constants()->prepare_resolved_references_for_archiving();
694 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
695 bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
696 assert(success, "must be");
697 int root_index = append_root(rr);
698 ik->constants()->cache()->set_archived_references(root_index);
699 }
700 }
701 }
702 }
705 void HeapShared::archive_strings() {
706 oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
707 bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
708 // We must succeed because:
709 // - _dumped_interned_strings do not contain any large strings.
710 // - StringTable::init_shared_table() doesn't create any large arrays.
711 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
712 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
713 }
714
715 int HeapShared::archive_exception_instance(oop exception) {
716 bool success = archive_reachable_objects_from(1, _default_subgraph_info, exception);
717 assert(success, "sanity");
718 return append_root(exception);
719 }
720
721 void HeapShared::mark_native_pointers(oop orig_obj) {
722 if (java_lang_Class::is_instance(orig_obj)) {
723 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
724 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
725 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
726 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
727 }
728 }
729
730 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
731 CachedOopInfo* info = archived_object_cache()->get(src_obj);
732 assert(info != nullptr, "must be");
733 has_oop_pointers = info->has_oop_pointers();
734 has_native_pointers = info->has_native_pointers();
735 }
736
737 void HeapShared::set_has_native_pointers(oop src_obj) {
738 CachedOopInfo* info = archived_object_cache()->get(src_obj);
739 assert(info != nullptr, "must be");
740 info->set_has_native_pointers();
741 }
742
743 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
744 {
745 NoSafepointVerifier nsv;
746
747 _default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
748 _trace = new GrowableArrayCHeap<oop, mtClassShared>(250);
749 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
750
751 // Cache for recording where the archived objects are copied to
752 create_archived_object_cache();
753
754 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
755 UseCompressedOops ? p2i(CompressedOops::begin()) :
756 p2i((address)G1CollectedHeap::heap()->reserved().start()),
757 UseCompressedOops ? p2i(CompressedOops::end()) :
758 p2i((address)G1CollectedHeap::heap()->reserved().end()));
759 copy_objects();
760
761 CDSHeapVerifier::verify();
762 check_default_subgraph_classes();
763 }
764
765 ArchiveHeapWriter::write(_pending_roots, heap_info);
766 }
767
768 void HeapShared::copy_interned_strings() {
769 init_seen_objects_table();
848 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
849 // Only objects of boot classes can be included in sub-graph.
850 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
851 assert(CDSConfig::is_dumping_heap(), "dump time only");
852 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
853
854 if (_subgraph_object_klasses == nullptr) {
855 _subgraph_object_klasses =
856 new (mtClass) GrowableArray<Klass*>(50, mtClass);
857 }
858
859 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
860
861 if (_k == buffered_k) {
862 // Don't add the Klass containing the sub-graph to it's own klass
863 // initialization list.
864 return;
865 }
866
867 if (buffered_k->is_instance_klass()) {
868 if (!ArchiveInvokeDynamic) {
869 // FIXME: this supports Lambda Proxy classes
870 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
871 "must be boot class");
872 }
873 // vmClasses::xxx_klass() are not updated, need to check
874 // the original Klass*
875 if (orig_k == vmClasses::String_klass() ||
876 orig_k == vmClasses::Object_klass()) {
877 // Initialized early during VM initialization. No need to be added
878 // to the sub-graph object class list.
879 return;
880 }
881 check_allowed_klass(InstanceKlass::cast(orig_k));
882 } else if (buffered_k->is_objArray_klass()) {
883 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
884 if (abk->is_instance_klass()) {
885 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
886 "must be boot class");
887 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
888 }
889 if (buffered_k == Universe::objectArrayKlass()) {
890 // Initialized early during Universe::genesis. No need to be added
891 // to the list.
892 return;
893 }
894 } else {
895 assert(buffered_k->is_typeArray_klass(), "must be");
896 // Primitive type arrays are created early during Universe::genesis.
897 return;
898 }
899
900 if (log_is_enabled(Debug, cds, heap)) {
901 if (!_subgraph_object_klasses->contains(buffered_k)) {
902 ResourceMark rm;
903 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
904 }
905 }
906
907 _subgraph_object_klasses->append_if_missing(buffered_k);
908 _has_non_early_klasses |= is_non_early_klass(orig_k);
909 }
910
911 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
912 if (ArchiveInvokeDynamic) {
913 // FIXME -- this allows LambdaProxy classes
914 return;
915 }
916 if (ik->module()->name() == vmSymbols::java_base()) {
917 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
918 return;
919 }
920
921 #ifndef PRODUCT
922 if (!ik->module()->is_named() && ik->package() == nullptr) {
923 // This class is loaded by ArchiveHeapTestClass
924 return;
925 }
926 const char* extra_msg = ", or in an unnamed package of an unnamed module";
927 #else
928 const char* extra_msg = "";
929 #endif
930
931 ResourceMark rm;
932 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
933 ik->external_name(), extra_msg);
934 MetaspaceShared::unrecoverable_writing_error();
935 }
993 _subgraph_object_klasses =
994 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
995 for (int i = 0; i < num_subgraphs_klasses; i++) {
996 Klass* subgraph_k = subgraph_object_klasses->at(i);
997 if (log_is_enabled(Info, cds, heap)) {
998 ResourceMark rm;
999 log_info(cds, heap)(
1000 "Archived object klass %s (%2d) => %s",
1001 _k->external_name(), i, subgraph_k->external_name());
1002 }
1003 _subgraph_object_klasses->at_put(i, subgraph_k);
1004 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
1005 }
1006 }
1007
1008 ArchivePtrMarker::mark_pointer(&_k);
1009 ArchivePtrMarker::mark_pointer(&_entry_field_records);
1010 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
1011 }
1012
1013 class HeapShared::CopyKlassSubGraphInfoToArchive : StackObj {
1014 public:
1015 CompactHashtableWriter* _writer;
1016 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
1017
1018 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
1019 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
1020 ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
1021 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
1022 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
1023 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
1024 _writer->add(hash, delta);
1025 }
1026 return true; // keep on iterating
1027 }
1028 };
1029
1030 ArchivedKlassSubGraphInfoRecord* HeapShared::archive_subgraph_info(KlassSubGraphInfo* info) {
1031 ArchivedKlassSubGraphInfoRecord* record =
1032 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
1033 record->init(info);
1034 return record;
1035 }
1036
1037 // Build the records of archived subgraph infos, which include:
1038 // - Entry points to all subgraphs from the containing class mirror. The entry
1039 // points are static fields in the mirror. For each entry point, the field
1040 // offset, and value are recorded in the sub-graph
1041 // info. The value is stored back to the corresponding field at runtime.
1042 // - A list of klasses that need to be loaded/initialized before archived
1043 // java object sub-graph can be accessed at runtime.
1044 void HeapShared::write_subgraph_info_table() {
1045 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
1046 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
1047 CompactHashtableStats stats;
1048
1049 _run_time_subgraph_info_table.reset();
1050
1051 CompactHashtableWriter writer(d_table->_count, &stats);
1052 CopyKlassSubGraphInfoToArchive copy(&writer);
1053 d_table->iterate(©);
1054 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
1055
1056 _runtime_default_subgraph_info = archive_subgraph_info(_default_subgraph_info);
1057
1058 #ifndef PRODUCT
1059 if (ArchiveHeapTestClass != nullptr) {
1060 size_t len = strlen(ArchiveHeapTestClass) + 1;
1061 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len);
1062 strncpy(array->adr_at(0), ArchiveHeapTestClass, len);
1063 _archived_ArchiveHeapTestClass = array;
1064 }
1065 #endif
1066 if (log_is_enabled(Info, cds, heap)) {
1067 print_stats();
1068 }
1069 }
1070
1071 void HeapShared::serialize_misc_info(SerializeClosure* soc) {
1072 soc->do_int(&_permobj_segments);
1073 }
1074
1075 void HeapShared::init_roots(oop roots_oop) {
1076 if (roots_oop != nullptr) {
1077 assert(ArchiveHeapLoader::is_in_use(), "must be");
1078 _roots = OopHandle(Universe::vm_global(), roots_oop);
1079 }
1080 }
1081
1082 void HeapShared::serialize_tables(SerializeClosure* soc) {
1083
1084 #ifndef PRODUCT
1085 soc->do_ptr(&_archived_ArchiveHeapTestClass);
1086 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
1087 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
1088 setup_test_class(_test_class_name);
1089 }
1090 #endif
1091
1092 _run_time_subgraph_info_table.serialize_header(soc);
1093 soc->do_ptr(&_runtime_default_subgraph_info);
1094
1095 }
1096
1097 static void verify_the_heap(Klass* k, const char* which) {
1098 if (VerifyArchivedFields > 0) {
1099 ResourceMark rm;
1100 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
1101 which, k->external_name());
1102
1103 VM_Verify verify_op;
1104 VMThread::execute(&verify_op);
1105
1106 if (VerifyArchivedFields > 1 && is_init_completed()) {
1107 // At this time, the oop->klass() of some archived objects in the heap may not
1108 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
1109 // have enough information (object size, oop maps, etc) so that a GC can be safely
1110 // performed.
1111 //
1112 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
1113 // to check for GC safety.
1114 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
1115 which, k->external_name());
1116 FlagSetting fs1(VerifyBeforeGC, true);
1117 FlagSetting fs2(VerifyDuringGC, true);
1118 FlagSetting fs3(VerifyAfterGC, true);
1119 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1120 }
1121 }
1122 }
1123
1124 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1125 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1126 //
1127 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1128 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1129 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1130 void HeapShared::resolve_classes(JavaThread* current) {
1131 assert(UseSharedSpaces, "runtime only!");
1132 if (!ArchiveHeapLoader::is_in_use()) {
1133 return; // nothing to do
1134 }
1135
1136 if (!CDSConfig::has_preloaded_classes()) {
1137 assert( _runtime_default_subgraph_info != nullptr, "must be");
1138 Array<Klass*>* klasses = _runtime_default_subgraph_info->subgraph_object_klasses();
1139 if (klasses != nullptr) {
1140 for (int i = 0; i < klasses->length(); i++) {
1141 Klass* k = klasses->at(i);
1142 ExceptionMark em(current); // no exception can happen here
1143 resolve_or_init(k, /*do_init*/false, current);
1144 }
1145 }
1146 }
1147
1148 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1149 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1150 }
1151
1152 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1153 for (int i = 0; fields[i].valid(); i++) {
1154 ArchivableStaticFieldInfo* info = &fields[i];
1155 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1156 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1157 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1158 resolve_classes_for_subgraph_of(current, k);
1159 }
1160 }
1161
1162 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1163 JavaThread* THREAD = current;
1164 ExceptionMark em(THREAD);
1165 const ArchivedKlassSubGraphInfoRecord* record =
1166 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1167 if (HAS_PENDING_EXCEPTION) {
1168 CLEAR_PENDING_EXCEPTION;
1169 }
1170 if (record == nullptr) {
1171 clear_archived_roots_of(k);
1172 }
1173 }
1174
1175 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1176 if (CDSConfig::is_loading_invokedynamic() || ArchiveInvokeDynamic) {
1177 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1178 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1179 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1180 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1181 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1182 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1183 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1184 }
1185 }
1186
1187 void HeapShared::initialize_default_subgraph_classes(Handle loader, TRAPS) {
1188 if (!ArchiveHeapLoader::is_in_use()) {
1189 return;
1190 }
1191
1192 assert( _runtime_default_subgraph_info != nullptr, "must be");
1193 Array<Klass*>* klasses = _runtime_default_subgraph_info->subgraph_object_klasses();
1194 if (klasses != nullptr) {
1195 for (int pass = 0; pass < 2; pass ++) {
1196 for (int i = 0; i < klasses->length(); i++) {
1197 Klass* k = klasses->at(i);
1198 if (k->class_loader_data() == nullptr) {
1199 // This class is not yet loaded. We will initialize it in a later phase.
1200 continue;
1201 }
1202 if (k->class_loader() == loader()) {
1203 if (pass == 0) {
1204 if (k->is_instance_klass()) {
1205 InstanceKlass::cast(k)->link_class(CHECK);
1206 }
1207 } else {
1208 resolve_or_init(k, /*do_init*/true, CHECK);
1209 }
1210 }
1211 }
1212 }
1213 }
1214 }
1215
1216 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
1217 JavaThread* THREAD = current;
1218 if (!ArchiveHeapLoader::is_in_use()) {
1219 return; // nothing to do
1220 }
1221
1222 ExceptionMark em(THREAD);
1223 const ArchivedKlassSubGraphInfoRecord* record =
1224 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
1225
1226 if (HAS_PENDING_EXCEPTION) {
1227 CLEAR_PENDING_EXCEPTION;
1228 // None of the field value will be set if there was an exception when initializing the classes.
1229 // The java code will not see any of the archived objects in the
1230 // subgraphs referenced from k in this case.
1231 return;
1232 }
1233
1234 if (record != nullptr) {
1235 init_archived_fields_for(k, record);
1269 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled",
1270 k->external_name());
1271 }
1272 return nullptr;
1273 }
1274
1275 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) {
1276 if (log_is_enabled(Info, cds, heap)) {
1277 ResourceMark rm(THREAD);
1278 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled",
1279 k->external_name());
1280 }
1281 return nullptr;
1282 }
1283
1284 if (log_is_enabled(Info, cds, heap)) {
1285 ResourceMark rm;
1286 log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name());
1287 }
1288
1289 if (do_init && k->name() == vmSymbols::java_lang_invoke_MethodType()) {
1290 // FIXME - hack.
1291 //
1292 // (The real fix would be to archive the MethodType class in its already initialized state. That
1293 // way we don't need to re-execute the <clinit> methods)
1294 //
1295 // We need to do this to break a cycle in the way the archived subgraphs are restored. Without this block, we
1296 // have the following sequence
1297 //
1298 // MethodType.<clinit>()
1299 // -> CDS.initializeFromArchive(MethodType.class);
1300 // -> (this "if" block)
1301 // -> resolve_or_init("MethodType", ...); // this does nothing because MethodType.<clinit> is already executing
1302 // -> resolve_or_init("DirectMethodHandle", ...); // this class is in record->subgraph_object_klasses();
1303 // -> DirectMethodHandle.<clinit>()
1304 // -> MethodType.methodType()
1305 // -> MethodType.genericMethodType()
1306 // -> aaload MethodType.objectOnlyTypes[n]; <<<< here
1307 //
1308 // We need to restore MethodType.objectOnlyTypes here, or else the above aaload will
1309 // get an NPE.
1310 Array<int>* entry_field_records = record->entry_field_records();
1311 assert(entry_field_records != nullptr, "must be");
1312 int efr_len = entry_field_records->length();
1313 assert(efr_len == 2, "must be");
1314 int root_index = entry_field_records->at(1);
1315 oop obj = get_root(root_index, /*clear=*/false);
1316 if (obj != nullptr) {
1317 objArrayOop archivedObjects = objArrayOop(obj);
1318 InstanceKlass* ik = InstanceKlass::cast(k);
1319 oop m = ik->java_mirror();
1320
1321 {
1322 fieldDescriptor fd;
1323 TempNewSymbol name = SymbolTable::new_symbol("archivedMethodTypes");
1324 TempNewSymbol sig = SymbolTable::new_symbol("Ljava/util/HashMap;");
1325 Klass* result = ik->find_field(name, sig, true, &fd);
1326 assert(result != nullptr, "must be");
1327 m->obj_field_put(fd.offset(), archivedObjects->obj_at(0));
1328 }
1329
1330 {
1331 fieldDescriptor fd;
1332 TempNewSymbol name = SymbolTable::new_symbol("objectOnlyTypes");
1333 TempNewSymbol sig = SymbolTable::new_symbol("[Ljava/lang/invoke/MethodType;");
1334 Klass* result = ik->find_field(name, sig, true, &fd);
1335 assert(result != nullptr, "must be");
1336 m->obj_field_put(fd.offset(), archivedObjects->obj_at(1));
1337 }
1338 }
1339 }
1340
1341 resolve_or_init(k, do_init, CHECK_NULL);
1342
1343 // Load/link/initialize the klasses of the objects in the subgraph.
1344 // nullptr class loader is used.
1345 Array<Klass*>* klasses = record->subgraph_object_klasses();
1346 if (klasses != nullptr) {
1347 for (int i = 0; i < klasses->length(); i++) {
1348 Klass* klass = klasses->at(i);
1349 if (!klass->is_shared()) {
1350 return nullptr;
1351 }
1352 resolve_or_init(klass, do_init, CHECK_NULL);
1353 }
1354 }
1355 }
1356
1357 return record;
1358 }
1359
1360 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1361 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name);
1362 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1363 if (k == nullptr) {
1364 return;
1365 }
1366 assert(k->is_shared_boot_class(), "sanity");
1367 resolve_or_init(k, false, CHECK);
1368 if (do_init) {
1369 resolve_or_init(k, true, CHECK);
1370 }
1371 }
1372
1373 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1374 if (!do_init) {
1375 if (k->class_loader_data() == nullptr) {
1376 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1377 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1378 }
1379 } else {
1380 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1381 if (k->is_instance_klass()) {
1382 InstanceKlass* ik = InstanceKlass::cast(k);
1383 ik->initialize(CHECK);
1384 } else if (k->is_objArray_klass()) {
1385 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1386 oak->initialize(CHECK);
1387 }
1388 }
1389 }
1390
1391 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1392 verify_the_heap(k, "before");
1422 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
1423 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
1424 if (record != nullptr) {
1425 Array<int>* entry_field_records = record->entry_field_records();
1426 if (entry_field_records != nullptr) {
1427 int efr_len = entry_field_records->length();
1428 assert(efr_len % 2 == 0, "sanity");
1429 for (int i = 0; i < efr_len; i += 2) {
1430 int root_index = entry_field_records->at(i+1);
1431 clear_root(root_index);
1432 }
1433 }
1434 }
1435 }
1436
1437 class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
1438 int _level;
1439 bool _record_klasses_only;
1440 KlassSubGraphInfo* _subgraph_info;
1441 oop _referencing_obj;
1442 // The following are for maintaining a stack for determining
1443 // CachedOopInfo::_referrer
1444 static WalkOopAndArchiveClosure* _current;
1445 WalkOopAndArchiveClosure* _last;
1446 public:
1447 WalkOopAndArchiveClosure(int level,
1448 bool record_klasses_only,
1449 KlassSubGraphInfo* subgraph_info,
1450 oop orig) :
1451 _level(level),
1452 _record_klasses_only(record_klasses_only),
1453 _subgraph_info(subgraph_info),
1454 _referencing_obj(orig) {
1455 _last = _current;
1456 _current = this;
1457 }
1458 ~WalkOopAndArchiveClosure() {
1459 _current = _last;
1460 }
1461 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1462 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); }
1463
1464 protected:
1465 template <class T> void do_oop_work(T *p) {
1466 oop obj = RawAccess<>::oop_load(p);
1467 if (!CompressedOops::is_null(obj)) {
1468 size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char));
1469 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) {
1470 ResourceMark rm;
1471 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level,
1472 _referencing_obj->klass()->external_name(), field_delta,
1473 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name());
1474 if (log_is_enabled(Trace, cds, heap)) {
1475 LogTarget(Trace, cds, heap) log;
1476 LogStream out(log);
1477 obj->print_on(&out);
1478 }
1479 }
1480
1481 bool success = HeapShared::archive_reachable_objects_from(
1482 _level + 1, _subgraph_info, obj);
1483 assert(success, "VM should have exited with unarchivable objects for _level > 1");
1484 }
1485 }
1486
1487 public:
1488 static WalkOopAndArchiveClosure* current() { return _current; }
1498
1499 template <class T> void check(T *p) {
1500 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1501 }
1502
1503 public:
1504 PointsToOopsChecker() : _result(false) {}
1505 void do_oop(narrowOop *p) { check(p); }
1506 void do_oop( oop *p) { check(p); }
1507 bool result() { return _result; }
1508 };
1509
1510 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1511 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1512 oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1513 PointsToOopsChecker points_to_oops_checker;
1514 obj->oop_iterate(&points_to_oops_checker);
1515 return CachedOopInfo(referrer, points_to_oops_checker.result());
1516 }
1517
1518 // We currently allow only the box classes, which are initialized very early by
1519 // HeapShared::init_box_classes().
1520 bool HeapShared::can_mirror_be_used_in_subgraph(oop orig_java_mirror) {
1521 return java_lang_Class::is_primitive(orig_java_mirror)
1522 || orig_java_mirror == vmClasses::Boolean_klass()->java_mirror()
1523 || orig_java_mirror == vmClasses::Character_klass()->java_mirror()
1524 || orig_java_mirror == vmClasses::Float_klass()->java_mirror()
1525 || orig_java_mirror == vmClasses::Double_klass()->java_mirror()
1526 || orig_java_mirror == vmClasses::Byte_klass()->java_mirror()
1527 || orig_java_mirror == vmClasses::Short_klass()->java_mirror()
1528 || orig_java_mirror == vmClasses::Integer_klass()->java_mirror()
1529 || orig_java_mirror == vmClasses::Long_klass()->java_mirror()
1530 || orig_java_mirror == vmClasses::Void_klass()->java_mirror()
1531 || orig_java_mirror == vmClasses::Object_klass()->java_mirror();
1532 }
1533
1534 void HeapShared::init_box_classes(TRAPS) {
1535 if (ArchiveHeapLoader::is_in_use()) {
1536 vmClasses::Boolean_klass()->initialize(CHECK);
1537 vmClasses::Character_klass()->initialize(CHECK);
1538 vmClasses::Float_klass()->initialize(CHECK);
1539 vmClasses::Double_klass()->initialize(CHECK);
1540 vmClasses::Byte_klass()->initialize(CHECK);
1541 vmClasses::Short_klass()->initialize(CHECK);
1542 vmClasses::Integer_klass()->initialize(CHECK);
1543 vmClasses::Long_klass()->initialize(CHECK);
1544 vmClasses::Void_klass()->initialize(CHECK);
1545 }
1546 }
1547
1548 void HeapShared::exit_on_error() {
1549 if (_context != nullptr) {
1550 ResourceMark rm;
1551 LogStream ls(Log(cds, heap)::error());
1552 ls.print_cr("Context");
1553 for (int i = 0; i < _context->length(); i++) {
1554 const char* s = _context->at(i);
1555 ls.print_cr("- %s", s);
1556 }
1557 }
1558 if (_trace != nullptr) {
1559 ResourceMark rm;
1560 LogStream ls(Log(cds, heap)::error());
1561 ls.print_cr("Reference trace");
1562 for (int i = 0; i < _trace->length(); i++) {
1563 oop orig_obj = _trace->at(i);
1564 ls.print_cr("[%d] ========================================", i);
1565 orig_obj->print_on(&ls);
1566 ls.cr();
1567 }
1568 }
1569 MetaspaceShared::unrecoverable_writing_error();
1570 }
1571
1572 // (1) If orig_obj has not been archived yet, archive it.
1573 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1574 // trace all objects that are reachable from it, and make sure these objects are archived.
1575 // (3) Record the klasses of all orig_obj and all reachable objects.
1576 bool HeapShared::archive_reachable_objects_from(int level,
1577 KlassSubGraphInfo* subgraph_info,
1578 oop orig_obj) {
1579 ArchivingObjectMark mark(orig_obj);
1580 assert(orig_obj != nullptr, "must be");
1581
1582 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1583 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1584 // If you get an error here, you probably made a change in the JDK library that has added
1585 // these objects that are referenced (directly or indirectly) by static fields.
1586 ResourceMark rm;
1587 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1588 exit_on_error();
1589 }
1590
1591 #if 0
1592 if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _default_subgraph_info) {
1593 if (can_mirror_be_used_in_subgraph(orig_obj)) {
1594 orig_obj = scratch_java_mirror(orig_obj);
1595 assert(orig_obj != nullptr, "must be archived");
1596 } else {
1597 // Don't follow the fields -- they will be nulled out when the mirror was copied
1598
1599 // FIXME - we should preserve the static fields of LambdaForm classes (and other hidden classes?)
1600 // so we need to walk the oop fields.
1601 orig_obj = scratch_java_mirror(orig_obj);
1602 assert(orig_obj != nullptr, "must be archived");
1603 }
1604 }
1605 #else
1606 if (java_lang_Class::is_instance(orig_obj)) {
1607 orig_obj = scratch_java_mirror(orig_obj);
1608 assert(orig_obj != nullptr, "must be archived");
1609 }
1610 #endif
1611
1612 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1613 // orig_obj has already been archived and traced. Nothing more to do.
1614 return true;
1615 } else {
1616 set_has_been_seen_during_subgraph_recording(orig_obj);
1617 }
1618
1619 bool already_archived = has_been_archived(orig_obj);
1620 bool record_klasses_only = already_archived;
1621 if (!already_archived) {
1622 ++_num_new_archived_objs;
1623 if (!archive_object(orig_obj)) {
1624 // Skip archiving the sub-graph referenced from the current entry field.
1625 ResourceMark rm;
1626 log_error(cds, heap)(
1627 "Cannot archive the sub-graph referenced from %s object ("
1628 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1629 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1630 if (level == 1) {
1631 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1632 // as the Java code will take care of initializing this field dynamically.
1633 return false;
1634 } else {
1635 // We don't know how to handle an object that has been archived, but some of its reachable
1636 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1637 // we have a real use case.
1638 exit_on_error();
1639 }
1640 }
1641 }
1642
1643 Klass *orig_k = orig_obj->klass();
1644 subgraph_info->add_subgraph_object_klass(orig_k);
1645
1646 WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1647 orig_obj->oop_iterate(&walker);
1648
1649 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1650 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1651 }
1652 return true;
1653 }
1654
1655 //
1656 // Start from the given static field in a java mirror and archive the
1657 // complete sub-graph of java heap objects that are reached directly
1658 // or indirectly from the starting object by following references.
1659 // Sub-graph archiving restrictions (current):
1660 //
1661 // - All classes of objects in the archived sub-graph (including the
1662 // entry class) must be boot class only.
1663 // - No java.lang.Class instance (java mirror) can be included inside
1664 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1665 //
1666 // The Java heap object sub-graph archiving process (see
1667 // WalkOopAndArchiveClosure):
1668 //
1669 // 1) Java object sub-graph archiving starts from a given static field
1670 // within a Class instance (java mirror). If the static field is a
1671 // reference field and points to a non-null java object, proceed to
1749 if (!CompressedOops::is_null(f)) {
1750 verify_subgraph_from(f);
1751 }
1752 }
1753
1754 void HeapShared::verify_subgraph_from(oop orig_obj) {
1755 if (!has_been_archived(orig_obj)) {
1756 // It's OK for the root of a subgraph to be not archived. See comments in
1757 // archive_reachable_objects_from().
1758 return;
1759 }
1760
1761 // Verify that all objects reachable from orig_obj are archived.
1762 init_seen_objects_table();
1763 verify_reachable_objects_from(orig_obj);
1764 delete_seen_objects_table();
1765 }
1766
1767 void HeapShared::verify_reachable_objects_from(oop obj) {
1768 _num_total_verifications ++;
1769 if (java_lang_Class::is_instance(obj)) {
1770 obj = scratch_java_mirror(obj);
1771 assert(obj != nullptr, "must be");
1772 }
1773 if (!has_been_seen_during_subgraph_recording(obj)) {
1774 set_has_been_seen_during_subgraph_recording(obj);
1775 assert(has_been_archived(obj), "must be");
1776 VerifySharedOopClosure walker;
1777 obj->oop_iterate(&walker);
1778 }
1779 }
1780 #endif
1781
1782 // The "default subgraph" contains special objects (see heapShared.hpp) that
1783 // can be accessed before we load any Java classes (including java/lang/Class).
1784 // Make sure that these are only instances of the very few specific types
1785 // that we can handle.
1786 void HeapShared::check_default_subgraph_classes() {
1787 GrowableArray<Klass*>* klasses = _default_subgraph_info->subgraph_object_klasses();
1788 int num = klasses->length();
1789 int warned = 0;
1790 for (int i = 0; i < num; i++) {
1791 Klass* subgraph_k = klasses->at(i);
1792 if (log_is_enabled(Info, cds, heap)) {
1793 ResourceMark rm;
1794 log_info(cds, heap)(
1795 "Archived object klass (default subgraph %d) => %s",
1796 i, subgraph_k->external_name());
1797 }
1798
1799 if (subgraph_k->is_instance_klass()) {
1800 InstanceKlass* ik = InstanceKlass::cast(subgraph_k);
1801 Symbol* name = ArchiveBuilder::current()->get_source_addr(ik->name());
1802 if (name != vmSymbols::java_lang_Class() &&
1803 name != vmSymbols::java_lang_String() &&
1804 name != vmSymbols::java_lang_ArithmeticException() &&
1805 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1806 name != vmSymbols::java_lang_ArrayStoreException() &&
1807 name != vmSymbols::java_lang_ClassCastException() &&
1808 name != vmSymbols::java_lang_InternalError() &&
1809 name != vmSymbols::java_lang_NullPointerException() &&
1810 !is_archivable_hidden_klass(ik)) {
1811 ResourceMark rm;
1812 const char* category = ArchiveUtils::class_category(ik);
1813 log_info(cds)("TODO: Archived unusual klass (default subgraph %2d) => %-5s %s",
1814 ++warned, category, ik->external_name());
1815 }
1816 }
1817 }
1818 }
1819
1820 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1821 int HeapShared::_num_new_walked_objs;
1822 int HeapShared::_num_new_archived_objs;
1823 int HeapShared::_num_old_recorded_klasses;
1824
1825 int HeapShared::_num_total_subgraph_recordings = 0;
1826 int HeapShared::_num_total_walked_objs = 0;
1827 int HeapShared::_num_total_archived_objs = 0;
1828 int HeapShared::_num_total_recorded_klasses = 0;
1829 int HeapShared::_num_total_verifications = 0;
1830
1831 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1832 return _seen_objects_table->get(obj) != nullptr;
1833 }
1834
1835 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1836 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1882 }
1883 }
1884 }
1885 bool found() { return _found; }
1886 int offset() { return _offset; }
1887 };
1888
1889 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
1890 TRAPS) {
1891 for (int i = 0; fields[i].valid(); i++) {
1892 ArchivableStaticFieldInfo* info = &fields[i];
1893 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1894 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name);
1895 ResourceMark rm; // for stringStream::as_string() etc.
1896
1897 #ifndef PRODUCT
1898 bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0);
1899 const char* test_class_name = ArchiveHeapTestClass;
1900 #else
1901 bool is_test_class = false;
1902 const char* test_class_name = "";
1903 #endif
1904
1905 if (is_test_class) {
1906 log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name);
1907 }
1908
1909 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD);
1910 if (HAS_PENDING_EXCEPTION) {
1911 CLEAR_PENDING_EXCEPTION;
1912 stringStream st;
1913 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name);
1914 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1915 }
1916
1917 if (!k->is_instance_klass()) {
1918 stringStream st;
1919 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name);
1920 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string());
1921 }
1922
2049 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2050 bool is_full_module_graph) {
2051 _num_total_subgraph_recordings = 0;
2052 _num_total_walked_objs = 0;
2053 _num_total_archived_objs = 0;
2054 _num_total_recorded_klasses = 0;
2055 _num_total_verifications = 0;
2056
2057 // For each class X that has one or more archived fields:
2058 // [1] Dump the subgraph of each archived field
2059 // [2] Create a list of all the class of the objects that can be reached
2060 // by any of these static fields.
2061 // At runtime, these classes are initialized before X's archived fields
2062 // are restored by HeapShared::initialize_from_archived_subgraph().
2063 int i;
2064 for (int i = 0; fields[i].valid(); ) {
2065 ArchivableStaticFieldInfo* info = &fields[i];
2066 const char* klass_name = info->klass_name;
2067 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2068
2069 ContextMark cm(klass_name);
2070 // If you have specified consecutive fields of the same klass in
2071 // fields[], these will be archived in the same
2072 // {start_recording_subgraph ... done_recording_subgraph} pass to
2073 // save time.
2074 for (; fields[i].valid(); i++) {
2075 ArchivableStaticFieldInfo* f = &fields[i];
2076 if (f->klass_name != klass_name) {
2077 break;
2078 }
2079
2080 ContextMark cm(f->field_name);
2081 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2082 f->offset, f->field_name);
2083 }
2084 done_recording_subgraph(info->klass, klass_name);
2085 }
2086
2087 log_info(cds, heap)("Archived subgraph records = %d",
2088 _num_total_subgraph_recordings);
2089 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2090 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2091 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2092
2093 #ifndef PRODUCT
2094 for (int i = 0; fields[i].valid(); i++) {
2095 ArchivableStaticFieldInfo* f = &fields[i];
2096 verify_subgraph_from_static_field(f->klass, f->offset);
2097 }
2098 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2099 #endif
2100 }
|