6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveBuilder.hpp"
27 #include "cds/archiveHeapLoader.hpp"
28 #include "cds/archiveHeapWriter.hpp"
29 #include "cds/archiveUtils.hpp"
30 #include "cds/cdsConfig.hpp"
31 #include "cds/cdsEnumKlass.hpp"
32 #include "cds/cdsHeapVerifier.hpp"
33 #include "cds/heapShared.hpp"
34 #include "cds/metaspaceShared.hpp"
35 #include "classfile/classLoaderData.hpp"
36 #include "classfile/classLoaderExt.hpp"
37 #include "classfile/javaClasses.inline.hpp"
38 #include "classfile/modules.hpp"
39 #include "classfile/stringTable.hpp"
40 #include "classfile/symbolTable.hpp"
41 #include "classfile/systemDictionary.hpp"
42 #include "classfile/systemDictionaryShared.hpp"
43 #include "classfile/vmClasses.hpp"
44 #include "classfile/vmSymbols.hpp"
45 #include "gc/shared/collectedHeap.hpp"
46 #include "gc/shared/gcLocker.hpp"
47 #include "gc/shared/gcVMOperations.hpp"
48 #include "logging/log.hpp"
49 #include "logging/logStream.hpp"
68 #include "gc/g1/g1CollectedHeap.hpp"
69 #endif
70
71 #if INCLUDE_CDS_JAVA_HEAP
72
73 struct ArchivableStaticFieldInfo {
74 const char* klass_name;
75 const char* field_name;
76 InstanceKlass* klass;
77 int offset;
78 BasicType type;
79
80 ArchivableStaticFieldInfo(const char* k, const char* f)
81 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
82
83 bool valid() {
84 return klass_name != nullptr;
85 }
86 };
87
88 bool HeapShared::_disable_writing = false;
89 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
90
91 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
92 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
93 size_t HeapShared::_total_obj_count;
94 size_t HeapShared::_total_obj_size;
95
96 #ifndef PRODUCT
97 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
98 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
99 static const char* _test_class_name = nullptr;
100 static const Klass* _test_class = nullptr;
101 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
102 #endif
103
104
105 //
106 // If you add new entries to the following tables, you should know what you're doing!
107 //
108
109 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
110 {"java/lang/Integer$IntegerCache", "archivedCache"},
111 {"java/lang/Long$LongCache", "archivedCache"},
112 {"java/lang/Byte$ByteCache", "archivedCache"},
113 {"java/lang/Short$ShortCache", "archivedCache"},
114 {"java/lang/Character$CharacterCache", "archivedCache"},
115 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
116 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
117 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
118 {"java/util/ImmutableCollections", "archivedObjects"},
119 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
120 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
121 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
122 #ifndef PRODUCT
123 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
124 #endif
125 {nullptr, nullptr},
126 };
127
128 // full module graph
129 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
130 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
131 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
132 {"java/lang/Module$ArchivedData", "archivedData"},
133 {nullptr, nullptr},
134 };
135
136 KlassSubGraphInfo* HeapShared::_default_subgraph_info;
137 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
138 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
139 int HeapShared::_root_segment_max_size_elems;
140 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
141 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
142 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
143
144 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
145 for (int i = 0; fields[i].valid(); i++) {
146 if (fields[i].klass == ik) {
147 return true;
148 }
149 }
150 return false;
151 }
152
153 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
154 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
155 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
156 }
157
205 vmSymbols::void_BuiltinClassLoader_signature(),
206 CHECK);
207 Handle boot_loader(THREAD, result.get_oop());
208 reset_states(boot_loader(), CHECK);
209 }
210
211 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
212
213 bool HeapShared::has_been_archived(oop obj) {
214 assert(CDSConfig::is_dumping_heap(), "dump-time only");
215 return archived_object_cache()->get(obj) != nullptr;
216 }
217
218 int HeapShared::append_root(oop obj) {
219 assert(CDSConfig::is_dumping_heap(), "dump-time only");
220
221 // No GC should happen since we aren't scanning _pending_roots.
222 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
223
224 if (_pending_roots == nullptr) {
225 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
226 }
227
228 return _pending_roots->append(obj);
229 }
230
231 objArrayOop HeapShared::root_segment(int segment_idx) {
232 if (CDSConfig::is_dumping_heap()) {
233 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
234 if (!HeapShared::can_write()) {
235 return nullptr;
236 }
237 } else {
238 assert(CDSConfig::is_using_archive(), "must be");
239 }
240
241 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
242 assert(segment != nullptr, "should have been initialized");
243 return segment;
244 }
245
246 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
247 assert(_root_segment_max_size_elems > 0, "sanity");
248
249 // Try to avoid divisions for the common case.
250 if (idx < _root_segment_max_size_elems) {
251 seg_idx = 0;
252 int_idx = idx;
253 } else {
254 seg_idx = idx / _root_segment_max_size_elems;
255 int_idx = idx % _root_segment_max_size_elems;
256 }
257
258 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
259 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
260 }
261
262 // Returns an objArray that contains all the roots of the archived objects
263 oop HeapShared::get_root(int index, bool clear) {
264 assert(index >= 0, "sanity");
265 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
281 get_segment_indexes(index, seg_idx, int_idx);
282 if (log_is_enabled(Debug, cds, heap)) {
283 oop old = root_segment(seg_idx)->obj_at(int_idx);
284 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
285 }
286 root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
287 }
288 }
289
290 bool HeapShared::archive_object(oop obj) {
291 assert(CDSConfig::is_dumping_heap(), "dump-time only");
292
293 assert(!obj->is_stackChunk(), "do not archive stack chunks");
294 if (has_been_archived(obj)) {
295 return true;
296 }
297
298 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
299 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
300 p2i(obj), obj->size());
301 return false;
302 } else {
303 count_allocation(obj->size());
304 ArchiveHeapWriter::add_source_obj(obj);
305 CachedOopInfo info = make_cached_oop_info(obj);
306 archived_object_cache()->put_when_absent(obj, info);
307 archived_object_cache()->maybe_grow();
308 mark_native_pointers(obj);
309
310 if (log_is_enabled(Debug, cds, heap)) {
311 ResourceMark rm;
312 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " : %s",
313 p2i(obj), obj->klass()->external_name());
314 }
315
316 if (java_lang_Module::is_instance(obj) && Modules::check_archived_module_oop(obj)) {
317 Modules::update_oops_in_archived_module(obj, append_root(obj));
318 }
319
320 return true;
321 }
322 }
323
324 class MetaspaceObjToOopHandleTable: public ResourceHashtable<MetaspaceObj*, OopHandle,
325 36137, // prime number
326 AnyObj::C_HEAP,
327 mtClassShared> {
328 public:
329 oop get_oop(MetaspaceObj* ptr) {
330 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
331 OopHandle* handle = get(ptr);
332 if (handle != nullptr) {
333 return handle->resolve();
335 return nullptr;
336 }
337 }
338 void set_oop(MetaspaceObj* ptr, oop o) {
339 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
340 OopHandle handle(Universe::vm_global(), o);
341 bool is_new = put(ptr, handle);
342 assert(is_new, "cannot set twice");
343 }
344 void remove_oop(MetaspaceObj* ptr) {
345 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
346 OopHandle* handle = get(ptr);
347 if (handle != nullptr) {
348 handle->release(Universe::vm_global());
349 remove(ptr);
350 }
351 }
352 };
353
354 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
355 _scratch_references_table->set_oop(src, dest);
356 }
357
358 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
359 return (objArrayOop)_scratch_references_table->get_oop(src);
360 }
361
362 void HeapShared::init_scratch_objects(TRAPS) {
363 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
364 BasicType bt = (BasicType)i;
365 if (!is_reference_type(bt)) {
366 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
367 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
368 }
369 }
370 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
371 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
372 }
373
374 oop HeapShared::scratch_java_mirror(BasicType t) {
375 assert((uint)t < T_VOID+1, "range check");
376 assert(!is_reference_type(t), "sanity");
377 return _scratch_basic_type_mirrors[t].resolve();
378 }
379
380 oop HeapShared::scratch_java_mirror(Klass* k) {
381 return _scratch_java_mirror_table->get_oop(k);
382 }
383
384 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
385 _scratch_java_mirror_table->set_oop(k, mirror);
386 }
387
388 void HeapShared::remove_scratch_objects(Klass* k) {
389 _scratch_java_mirror_table->remove_oop(k);
390 if (k->is_instance_klass()) {
391 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
392 }
393 }
394
395 void HeapShared::archive_java_mirrors() {
396 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
397 BasicType bt = (BasicType)i;
398 if (!is_reference_type(bt)) {
399 oop m = _scratch_basic_type_mirrors[i].resolve();
400 assert(m != nullptr, "sanity");
401 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
402 assert(success, "sanity");
403
404 log_trace(cds, heap, mirror)(
405 "Archived %s mirror object from " PTR_FORMAT,
406 type2name(bt), p2i(m));
407
408 Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
409 }
410 }
411
412 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
413 assert(klasses != nullptr, "sanity");
414 for (int i = 0; i < klasses->length(); i++) {
415 Klass* orig_k = klasses->at(i);
416 oop m = scratch_java_mirror(orig_k);
417 if (m != nullptr) {
418 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
419 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
420 guarantee(success, "scratch mirrors must point to only archivable objects");
421 buffered_k->set_archived_java_mirror(append_root(m));
422 ResourceMark rm;
423 log_trace(cds, heap, mirror)(
424 "Archived %s mirror object from " PTR_FORMAT,
425 buffered_k->external_name(), p2i(m));
426
427 // archive the resolved_referenes array
428 if (buffered_k->is_instance_klass()) {
429 InstanceKlass* ik = InstanceKlass::cast(buffered_k);
430 oop rr = ik->constants()->prepare_resolved_references_for_archiving();
431 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
432 bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
433 assert(success, "must be");
434 int root_index = append_root(rr);
435 ik->constants()->cache()->set_archived_references(root_index);
442 void HeapShared::archive_strings() {
443 oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
444 bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
445 // We must succeed because:
446 // - _dumped_interned_strings do not contain any large strings.
447 // - StringTable::init_shared_table() doesn't create any large arrays.
448 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
449 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
450 }
451
452 int HeapShared::archive_exception_instance(oop exception) {
453 bool success = archive_reachable_objects_from(1, _default_subgraph_info, exception);
454 assert(success, "sanity");
455 return append_root(exception);
456 }
457
458 void HeapShared::mark_native_pointers(oop orig_obj) {
459 if (java_lang_Class::is_instance(orig_obj)) {
460 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
461 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
462 }
463 }
464
465 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
466 CachedOopInfo* info = archived_object_cache()->get(src_obj);
467 assert(info != nullptr, "must be");
468 has_oop_pointers = info->has_oop_pointers();
469 has_native_pointers = info->has_native_pointers();
470 }
471
472 void HeapShared::set_has_native_pointers(oop src_obj) {
473 CachedOopInfo* info = archived_object_cache()->get(src_obj);
474 assert(info != nullptr, "must be");
475 info->set_has_native_pointers();
476 }
477
478 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
479 {
480 NoSafepointVerifier nsv;
481
482 _default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
483
484 // Cache for recording where the archived objects are copied to
485 create_archived_object_cache();
486
487 if (UseCompressedOops || UseG1GC) {
488 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
489 UseCompressedOops ? p2i(CompressedOops::begin()) :
490 p2i((address)G1CollectedHeap::heap()->reserved().start()),
491 UseCompressedOops ? p2i(CompressedOops::end()) :
492 p2i((address)G1CollectedHeap::heap()->reserved().end()));
493 }
494 copy_objects();
495
496 CDSHeapVerifier::verify();
497 check_default_subgraph_classes();
498 }
499
500 ArchiveHeapWriter::write(_pending_roots, heap_info);
501 }
502
503 void HeapShared::copy_interned_strings() {
504 init_seen_objects_table();
505
506 auto copier = [&] (oop s, bool value_ignored) {
507 assert(s != nullptr, "sanity");
508 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
509 bool success = archive_reachable_objects_from(1, _default_subgraph_info, s);
510 assert(success, "must be");
511 // Prevent string deduplication from changing the value field to
512 // something not in the archive.
513 java_lang_String::set_deduplication_forbidden(s);
514 };
515 _dumped_interned_strings->iterate_all(copier);
516
517 delete_seen_objects_table();
518 }
519
520 void HeapShared::copy_special_objects() {
583 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
584 // Only objects of boot classes can be included in sub-graph.
585 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
586 assert(CDSConfig::is_dumping_heap(), "dump time only");
587 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
588
589 if (_subgraph_object_klasses == nullptr) {
590 _subgraph_object_klasses =
591 new (mtClass) GrowableArray<Klass*>(50, mtClass);
592 }
593
594 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
595
596 if (_k == buffered_k) {
597 // Don't add the Klass containing the sub-graph to it's own klass
598 // initialization list.
599 return;
600 }
601
602 if (buffered_k->is_instance_klass()) {
603 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
604 "must be boot class");
605 // vmClasses::xxx_klass() are not updated, need to check
606 // the original Klass*
607 if (orig_k == vmClasses::String_klass() ||
608 orig_k == vmClasses::Object_klass()) {
609 // Initialized early during VM initialization. No need to be added
610 // to the sub-graph object class list.
611 return;
612 }
613 check_allowed_klass(InstanceKlass::cast(orig_k));
614 } else if (buffered_k->is_objArray_klass()) {
615 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
616 if (abk->is_instance_klass()) {
617 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
618 "must be boot class");
619 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
620 }
621 if (buffered_k == Universe::objectArrayKlass()) {
622 // Initialized early during Universe::genesis. No need to be added
623 // to the list.
624 return;
625 }
626 } else {
627 assert(buffered_k->is_typeArray_klass(), "must be");
628 // Primitive type arrays are created early during Universe::genesis.
629 return;
630 }
631
632 if (log_is_enabled(Debug, cds, heap)) {
633 if (!_subgraph_object_klasses->contains(buffered_k)) {
634 ResourceMark rm;
635 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
636 }
637 }
638
639 _subgraph_object_klasses->append_if_missing(buffered_k);
640 _has_non_early_klasses |= is_non_early_klass(orig_k);
641 }
642
643 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
644 if (ik->module()->name() == vmSymbols::java_base()) {
645 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
646 return;
647 }
648
649 #ifndef PRODUCT
650 if (!ik->module()->is_named() && ik->package() == nullptr) {
651 // This class is loaded by ArchiveHeapTestClass
652 return;
653 }
654 const char* extra_msg = ", or in an unnamed package of an unnamed module";
655 #else
656 const char* extra_msg = "";
657 #endif
658
659 ResourceMark rm;
660 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
661 ik->external_name(), extra_msg);
662 MetaspaceShared::unrecoverable_writing_error();
663 }
721 _subgraph_object_klasses =
722 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
723 for (int i = 0; i < num_subgraphs_klasses; i++) {
724 Klass* subgraph_k = subgraph_object_klasses->at(i);
725 if (log_is_enabled(Info, cds, heap)) {
726 ResourceMark rm;
727 log_info(cds, heap)(
728 "Archived object klass %s (%2d) => %s",
729 _k->external_name(), i, subgraph_k->external_name());
730 }
731 _subgraph_object_klasses->at_put(i, subgraph_k);
732 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
733 }
734 }
735
736 ArchivePtrMarker::mark_pointer(&_k);
737 ArchivePtrMarker::mark_pointer(&_entry_field_records);
738 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
739 }
740
741 struct CopyKlassSubGraphInfoToArchive : StackObj {
742 CompactHashtableWriter* _writer;
743 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
744
745 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
746 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
747 ArchivedKlassSubGraphInfoRecord* record =
748 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
749 record->init(&info);
750
751 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
752 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
753 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
754 _writer->add(hash, delta);
755 }
756 return true; // keep on iterating
757 }
758 };
759
760 // Build the records of archived subgraph infos, which include:
761 // - Entry points to all subgraphs from the containing class mirror. The entry
762 // points are static fields in the mirror. For each entry point, the field
763 // offset, and value are recorded in the sub-graph
764 // info. The value is stored back to the corresponding field at runtime.
765 // - A list of klasses that need to be loaded/initialized before archived
766 // java object sub-graph can be accessed at runtime.
767 void HeapShared::write_subgraph_info_table() {
768 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
769 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
770 CompactHashtableStats stats;
771
772 _run_time_subgraph_info_table.reset();
773
774 CompactHashtableWriter writer(d_table->_count, &stats);
775 CopyKlassSubGraphInfoToArchive copy(&writer);
776 d_table->iterate(©);
777 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
778
779 #ifndef PRODUCT
796 _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
797 }
798 _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
799 }
800
801 void HeapShared::init_root_segment_sizes(int max_size_elems) {
802 _root_segment_max_size_elems = max_size_elems;
803 }
804
805 void HeapShared::serialize_tables(SerializeClosure* soc) {
806
807 #ifndef PRODUCT
808 soc->do_ptr(&_archived_ArchiveHeapTestClass);
809 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
810 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
811 setup_test_class(_test_class_name);
812 }
813 #endif
814
815 _run_time_subgraph_info_table.serialize_header(soc);
816 }
817
818 static void verify_the_heap(Klass* k, const char* which) {
819 if (VerifyArchivedFields > 0) {
820 ResourceMark rm;
821 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
822 which, k->external_name());
823
824 VM_Verify verify_op;
825 VMThread::execute(&verify_op);
826
827 if (VerifyArchivedFields > 1 && is_init_completed()) {
828 // At this time, the oop->klass() of some archived objects in the heap may not
829 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
830 // have enough information (object size, oop maps, etc) so that a GC can be safely
831 // performed.
832 //
833 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
834 // to check for GC safety.
835 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
836 which, k->external_name());
837 FlagSetting fs1(VerifyBeforeGC, true);
838 FlagSetting fs2(VerifyDuringGC, true);
839 FlagSetting fs3(VerifyAfterGC, true);
840 Universe::heap()->collect(GCCause::_java_lang_system_gc);
841 }
842 }
843 }
844
845 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
846 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
847 //
848 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
849 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
850 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
851 void HeapShared::resolve_classes(JavaThread* current) {
852 assert(CDSConfig::is_using_archive(), "runtime only!");
853 if (!ArchiveHeapLoader::is_in_use()) {
854 return; // nothing to do
855 }
856 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
857 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
858 }
859
860 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
861 for (int i = 0; fields[i].valid(); i++) {
862 ArchivableStaticFieldInfo* info = &fields[i];
863 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
864 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
865 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
866 resolve_classes_for_subgraph_of(current, k);
867 }
868 }
869
870 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
871 JavaThread* THREAD = current;
872 ExceptionMark em(THREAD);
873 const ArchivedKlassSubGraphInfoRecord* record =
874 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
875 if (HAS_PENDING_EXCEPTION) {
876 CLEAR_PENDING_EXCEPTION;
877 }
878 if (record == nullptr) {
879 clear_archived_roots_of(k);
880 }
881 }
882
883 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
884 JavaThread* THREAD = current;
885 if (!ArchiveHeapLoader::is_in_use()) {
886 return; // nothing to do
887 }
888
889 if (k->name()->equals("jdk/internal/module/ArchivedModuleGraph") &&
890 !CDSConfig::is_using_optimized_module_handling() &&
891 // archive was created with --module-path
892 ClassLoaderExt::num_module_paths() > 0) {
893 // ArchivedModuleGraph was created with a --module-path that's different than the runtime --module-path.
894 // Thus, it might contain references to modules that do not exist at runtime. We cannot use it.
895 log_info(cds, heap)("Skip initializing ArchivedModuleGraph subgraph: is_using_optimized_module_handling=%s num_module_paths=%d",
896 BOOL_TO_STR(CDSConfig::is_using_optimized_module_handling()), ClassLoaderExt::num_module_paths());
897 return;
898 }
899
900 ExceptionMark em(THREAD);
901 const ArchivedKlassSubGraphInfoRecord* record =
902 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
966
967 resolve_or_init(k, do_init, CHECK_NULL);
968
969 // Load/link/initialize the klasses of the objects in the subgraph.
970 // nullptr class loader is used.
971 Array<Klass*>* klasses = record->subgraph_object_klasses();
972 if (klasses != nullptr) {
973 for (int i = 0; i < klasses->length(); i++) {
974 Klass* klass = klasses->at(i);
975 if (!klass->is_shared()) {
976 return nullptr;
977 }
978 resolve_or_init(klass, do_init, CHECK_NULL);
979 }
980 }
981 }
982
983 return record;
984 }
985
986 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
987 if (!do_init) {
988 if (k->class_loader_data() == nullptr) {
989 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
990 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
991 }
992 } else {
993 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
994 if (k->is_instance_klass()) {
995 InstanceKlass* ik = InstanceKlass::cast(k);
996 ik->initialize(CHECK);
997 } else if (k->is_objArray_klass()) {
998 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
999 oak->initialize(CHECK);
1000 }
1001 }
1002 }
1003
1004 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1005 verify_the_heap(k, "before");
1113
1114 template <class T> void check(T *p) {
1115 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1116 }
1117
1118 public:
1119 PointsToOopsChecker() : _result(false) {}
1120 void do_oop(narrowOop *p) { check(p); }
1121 void do_oop( oop *p) { check(p); }
1122 bool result() { return _result; }
1123 };
1124
1125 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1126 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1127 oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1128 PointsToOopsChecker points_to_oops_checker;
1129 obj->oop_iterate(&points_to_oops_checker);
1130 return CachedOopInfo(referrer, points_to_oops_checker.result());
1131 }
1132
1133 // (1) If orig_obj has not been archived yet, archive it.
1134 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1135 // trace all objects that are reachable from it, and make sure these objects are archived.
1136 // (3) Record the klasses of all orig_obj and all reachable objects.
1137 bool HeapShared::archive_reachable_objects_from(int level,
1138 KlassSubGraphInfo* subgraph_info,
1139 oop orig_obj) {
1140 assert(orig_obj != nullptr, "must be");
1141
1142 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1143 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1144 // If you get an error here, you probably made a change in the JDK library that has added
1145 // these objects that are referenced (directly or indirectly) by static fields.
1146 ResourceMark rm;
1147 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name());
1148 if (log_is_enabled(Trace, cds, heap)) {
1149 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1150 if (walker != nullptr) {
1151 LogStream ls(Log(cds, heap)::trace());
1152 CDSHeapVerifier::trace_to_root(&ls, walker->referencing_obj());
1153 }
1154 }
1155 MetaspaceShared::unrecoverable_writing_error();
1156 }
1157
1158 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support
1159 // them as Klass::_archived_mirror because they need to be specially restored at run time.
1160 //
1161 // If you get an error here, you probably made a change in the JDK library that has added a Class
1162 // object that is referenced (directly or indirectly) by static fields.
1163 if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _default_subgraph_info) {
1164 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level);
1165 MetaspaceShared::unrecoverable_writing_error();
1166 }
1167
1168 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1169 // orig_obj has already been archived and traced. Nothing more to do.
1170 return true;
1171 } else {
1172 set_has_been_seen_during_subgraph_recording(orig_obj);
1173 }
1174
1175 bool already_archived = has_been_archived(orig_obj);
1176 bool record_klasses_only = already_archived;
1177 if (!already_archived) {
1178 ++_num_new_archived_objs;
1179 if (!archive_object(orig_obj)) {
1180 // Skip archiving the sub-graph referenced from the current entry field.
1181 ResourceMark rm;
1182 log_error(cds, heap)(
1183 "Cannot archive the sub-graph referenced from %s object ("
1184 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1185 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1186 if (level == 1) {
1187 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1188 // as the Java code will take care of initializing this field dynamically.
1189 return false;
1190 } else {
1191 // We don't know how to handle an object that has been archived, but some of its reachable
1192 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1193 // we have a real use case.
1194 MetaspaceShared::unrecoverable_writing_error();
1195 }
1196 }
1197 }
1198
1199 Klass *orig_k = orig_obj->klass();
1200 subgraph_info->add_subgraph_object_klass(orig_k);
1201
1202 WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1203 orig_obj->oop_iterate(&walker);
1204
1205 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1206 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1207 }
1208 return true;
1209 }
1210
1211 //
1212 // Start from the given static field in a java mirror and archive the
1213 // complete sub-graph of java heap objects that are reached directly
1214 // or indirectly from the starting object by following references.
1215 // Sub-graph archiving restrictions (current):
1216 //
1217 // - All classes of objects in the archived sub-graph (including the
1218 // entry class) must be boot class only.
1219 // - No java.lang.Class instance (java mirror) can be included inside
1220 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1221 //
1222 // The Java heap object sub-graph archiving process (see
1223 // WalkOopAndArchiveClosure):
1224 //
1225 // 1) Java object sub-graph archiving starts from a given static field
1226 // within a Class instance (java mirror). If the static field is a
1227 // reference field and points to a non-null java object, proceed to
1305 if (!CompressedOops::is_null(f)) {
1306 verify_subgraph_from(f);
1307 }
1308 }
1309
1310 void HeapShared::verify_subgraph_from(oop orig_obj) {
1311 if (!has_been_archived(orig_obj)) {
1312 // It's OK for the root of a subgraph to be not archived. See comments in
1313 // archive_reachable_objects_from().
1314 return;
1315 }
1316
1317 // Verify that all objects reachable from orig_obj are archived.
1318 init_seen_objects_table();
1319 verify_reachable_objects_from(orig_obj);
1320 delete_seen_objects_table();
1321 }
1322
1323 void HeapShared::verify_reachable_objects_from(oop obj) {
1324 _num_total_verifications ++;
1325 if (!has_been_seen_during_subgraph_recording(obj)) {
1326 set_has_been_seen_during_subgraph_recording(obj);
1327 assert(has_been_archived(obj), "must be");
1328 VerifySharedOopClosure walker;
1329 obj->oop_iterate(&walker);
1330 }
1331 }
1332 #endif
1333
1334 // The "default subgraph" contains special objects (see heapShared.hpp) that
1335 // can be accessed before we load any Java classes (including java/lang/Class).
1336 // Make sure that these are only instances of the very few specific types
1337 // that we can handle.
1338 void HeapShared::check_default_subgraph_classes() {
1339 GrowableArray<Klass*>* klasses = _default_subgraph_info->subgraph_object_klasses();
1340 int num = klasses->length();
1341 for (int i = 0; i < num; i++) {
1342 Klass* subgraph_k = klasses->at(i);
1343 if (log_is_enabled(Info, cds, heap)) {
1344 ResourceMark rm;
1345 log_info(cds, heap)(
1346 "Archived object klass (default subgraph %d) => %s",
1347 i, subgraph_k->external_name());
1348 }
1349
1350 Symbol* name = ArchiveBuilder::current()->get_source_addr(subgraph_k->name());
1351 guarantee(name == vmSymbols::java_lang_Class() ||
1352 name == vmSymbols::java_lang_String() ||
1353 name == vmSymbols::java_lang_ArithmeticException() ||
1354 name == vmSymbols::java_lang_NullPointerException() ||
1355 name == vmSymbols::java_lang_InternalError() ||
1356 name == vmSymbols::java_lang_ArrayIndexOutOfBoundsException() ||
1357 name == vmSymbols::java_lang_ArrayStoreException() ||
1358 name == vmSymbols::java_lang_ClassCastException() ||
1359 name == vmSymbols::object_array_signature() ||
1360 name == vmSymbols::byte_array_signature() ||
1361 name == vmSymbols::char_array_signature(),
1362 "default subgraph can have only these objects");
1363 }
1364 }
1365
1366 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1367 int HeapShared::_num_new_walked_objs;
1368 int HeapShared::_num_new_archived_objs;
1369 int HeapShared::_num_old_recorded_klasses;
1370
1371 int HeapShared::_num_total_subgraph_recordings = 0;
1372 int HeapShared::_num_total_walked_objs = 0;
1373 int HeapShared::_num_total_archived_objs = 0;
1374 int HeapShared::_num_total_recorded_klasses = 0;
1375 int HeapShared::_num_total_verifications = 0;
1376
1377 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
1378 return _seen_objects_table->get(obj) != nullptr;
1379 }
1380
1381 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
1382 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
1565 return false;
1566 }
1567
1568 // See KlassSubGraphInfo::check_allowed_klass() - only two types of
1569 // classes are allowed:
1570 // (A) java.base classes (which must not be in the unnamed module)
1571 // (B) test classes which must be in the unnamed package of the unnamed module.
1572 // So if we see a '/' character in the class name, it must be in (A);
1573 // otherwise it must be in (B).
1574 if (name->index_of_at(0, "/", 1) >= 0) {
1575 return false; // (A)
1576 }
1577
1578 return true; // (B)
1579 }
1580 }
1581 }
1582
1583 return false;
1584 }
1585 #endif
1586
1587 void HeapShared::init_for_dumping(TRAPS) {
1588 if (HeapShared::can_write()) {
1589 setup_test_class(ArchiveHeapTestClass);
1590 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
1591 init_subgraph_entry_fields(CHECK);
1592 }
1593 }
1594
1595 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
1596 bool is_full_module_graph) {
1597 _num_total_subgraph_recordings = 0;
1598 _num_total_walked_objs = 0;
1599 _num_total_archived_objs = 0;
1600 _num_total_recorded_klasses = 0;
1601 _num_total_verifications = 0;
1602
1603 // For each class X that has one or more archived fields:
1604 // [1] Dump the subgraph of each archived field
1605 // [2] Create a list of all the class of the objects that can be reached
1606 // by any of these static fields.
1607 // At runtime, these classes are initialized before X's archived fields
1608 // are restored by HeapShared::initialize_from_archived_subgraph().
1609 for (int i = 0; fields[i].valid(); ) {
1610 ArchivableStaticFieldInfo* info = &fields[i];
1611 const char* klass_name = info->klass_name;
1612 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
1613
1614 // If you have specified consecutive fields of the same klass in
1615 // fields[], these will be archived in the same
1616 // {start_recording_subgraph ... done_recording_subgraph} pass to
1617 // save time.
1618 for (; fields[i].valid(); i++) {
1619 ArchivableStaticFieldInfo* f = &fields[i];
1620 if (f->klass_name != klass_name) {
1621 break;
1622 }
1623
1624 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
1625 f->offset, f->field_name);
1626 }
1627 done_recording_subgraph(info->klass, klass_name);
1628 }
1629
1630 log_info(cds, heap)("Archived subgraph records = %d",
1631 _num_total_subgraph_recordings);
1632 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
1633 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
1634 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
1635
1636 #ifndef PRODUCT
1637 for (int i = 0; fields[i].valid(); i++) {
1638 ArchivableStaticFieldInfo* f = &fields[i];
1639 verify_subgraph_from_static_field(f->klass, f->offset);
1640 }
1641 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
1642 #endif
1643 }
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/archiveBuilder.hpp"
28 #include "cds/archiveHeapLoader.hpp"
29 #include "cds/archiveHeapWriter.hpp"
30 #include "cds/archiveUtils.hpp"
31 #include "cds/cdsAccess.hpp"
32 #include "cds/cdsConfig.hpp"
33 #include "cds/cdsEnumKlass.hpp"
34 #include "cds/cdsHeapVerifier.hpp"
35 #include "cds/heapShared.hpp"
36 #include "cds/metaspaceShared.hpp"
37 #include "classfile/classLoaderData.hpp"
38 #include "classfile/classLoaderExt.hpp"
39 #include "classfile/javaClasses.inline.hpp"
40 #include "classfile/modules.hpp"
41 #include "classfile/stringTable.hpp"
42 #include "classfile/symbolTable.hpp"
43 #include "classfile/systemDictionary.hpp"
44 #include "classfile/systemDictionaryShared.hpp"
45 #include "classfile/vmClasses.hpp"
46 #include "classfile/vmSymbols.hpp"
47 #include "gc/shared/collectedHeap.hpp"
48 #include "gc/shared/gcLocker.hpp"
49 #include "gc/shared/gcVMOperations.hpp"
50 #include "logging/log.hpp"
51 #include "logging/logStream.hpp"
70 #include "gc/g1/g1CollectedHeap.hpp"
71 #endif
72
73 #if INCLUDE_CDS_JAVA_HEAP
74
75 struct ArchivableStaticFieldInfo {
76 const char* klass_name;
77 const char* field_name;
78 InstanceKlass* klass;
79 int offset;
80 BasicType type;
81
82 ArchivableStaticFieldInfo(const char* k, const char* f)
83 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
84
85 bool valid() {
86 return klass_name != nullptr;
87 }
88 };
89
90 class HeapShared::ArchivingObjectMark : public StackObj {
91 public:
92 ArchivingObjectMark(oop obj) {
93 _trace->push(obj);
94 }
95 ~ArchivingObjectMark() {
96 _trace->pop();
97 }
98 };
99
100 class HeapShared::ContextMark : public StackObj {
101 ResourceMark rm;
102 public:
103 ContextMark(const char* c) : rm{} {
104 _context->push(c);
105 }
106 ~ContextMark() {
107 _context->pop();
108 }
109 };
110
111 bool HeapShared::_disable_writing = false;
112 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
113
114 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
115 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
116 size_t HeapShared::_total_obj_count;
117 size_t HeapShared::_total_obj_size;
118
119 #ifndef PRODUCT
120 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
121 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
122 static const char* _test_class_name = nullptr;
123 static Klass* _test_class = nullptr;
124 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
125 #endif
126
127
128 //
129 // If you add new entries to the following tables, you should know what you're doing!
130 //
131
132 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
133 {"java/lang/Integer$IntegerCache", "archivedCache"},
134 {"java/lang/Long$LongCache", "archivedCache"},
135 {"java/lang/Byte$ByteCache", "archivedCache"},
136 {"java/lang/Short$ShortCache", "archivedCache"},
137 {"java/lang/Character$CharacterCache", "archivedCache"},
138 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
139 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
140 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
141 {"java/util/ImmutableCollections", "archivedObjects"},
142 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
143 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
144 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
145 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
146
147 #ifndef PRODUCT
148 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
149 #endif
150 {nullptr, nullptr},
151 };
152
153 // full module graph
154 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
155 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
156 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
157 {"java/lang/Module$ArchivedData", "archivedData"},
158 {nullptr, nullptr},
159 };
160
161 KlassSubGraphInfo* HeapShared::_default_subgraph_info;
162 ArchivedKlassSubGraphInfoRecord* HeapShared::_runtime_default_subgraph_info;
163 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
164 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_trace = nullptr;
165 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
166 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments;
167 int HeapShared::_root_segment_max_size_elems;
168 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
169 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr;
170 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr;
171
172 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
173 for (int i = 0; fields[i].valid(); i++) {
174 if (fields[i].klass == ik) {
175 return true;
176 }
177 }
178 return false;
179 }
180
181 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
182 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
183 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
184 }
185
233 vmSymbols::void_BuiltinClassLoader_signature(),
234 CHECK);
235 Handle boot_loader(THREAD, result.get_oop());
236 reset_states(boot_loader(), CHECK);
237 }
238
239 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
240
241 bool HeapShared::has_been_archived(oop obj) {
242 assert(CDSConfig::is_dumping_heap(), "dump-time only");
243 return archived_object_cache()->get(obj) != nullptr;
244 }
245
246 int HeapShared::append_root(oop obj) {
247 assert(CDSConfig::is_dumping_heap(), "dump-time only");
248
249 // No GC should happen since we aren't scanning _pending_roots.
250 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
251
252 if (_pending_roots == nullptr) {
253 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
254 }
255
256 OopHandle oh(Universe::vm_global(), obj);
257 return _pending_roots->append(oh);
258 }
259
260 objArrayOop HeapShared::root_segment(int segment_idx) {
261 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
262 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
263 if (!HeapShared::can_write()) {
264 return nullptr;
265 }
266 } else {
267 assert(CDSConfig::is_using_archive(), "must be");
268 }
269
270 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
271 assert(segment != nullptr, "should have been initialized");
272 return segment;
273 }
274
275 inline unsigned int oop_handle_hash(const OopHandle& oh) {
276 oop o = oh.resolve();
277 if (o == nullptr) {
278 return 0;
279 } else {
280 return o->identity_hash();
281 }
282 }
283
284 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) {
285 return a.resolve() == b.resolve();
286 }
287
288 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle,
289 36137, // prime number
290 AnyObj::C_HEAP,
291 mtClassShared,
292 oop_handle_hash,
293 oop_handle_equals> {};
294
295 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
296
297 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
298 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
299 if (_orig_to_scratch_object_table == nullptr) {
300 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
301 }
302
303 OopHandle orig_h(Universe::vm_global(), orig_obj);
304 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
305 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
306 }
307
308 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
309 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
310 if (_orig_to_scratch_object_table != nullptr) {
311 OopHandle orig(&orig_obj);
312 OopHandle* v = _orig_to_scratch_object_table->get(orig);
313 if (v != nullptr) {
314 return v->resolve();
315 }
316 }
317 return nullptr;
318 }
319
320 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
321 // to Strings and MH oops.
322 //
323 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
324 // and are accssed vis CDSAccess::get_archived_object(int).
325 struct PermanentOopInfo {
326 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
327 int _heap_offset; // Offset of the object from the bottom of the archived heap.
328 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
329 };
330
331 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo,
332 36137, // prime number
333 AnyObj::C_HEAP,
334 mtClassShared,
335 oop_handle_hash,
336 oop_handle_equals> {};
337
338 static int _dumptime_permanent_oop_count = 0;
339 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
340 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
341
342 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
343 // so we can remember their offset (from the bottom of the archived heap).
344 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
345 assert_at_safepoint();
346 if (_dumptime_permanent_oop_table == nullptr) {
347 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
348 }
349
350 PermanentOopInfo info(-1, offset);
351 OopHandle oh(Universe::vm_global(), obj);
352 _dumptime_permanent_oop_table->put_when_absent(oh, info);
353 }
354
355 // A permanent index is assigned to an archived object ONLY when
356 // the AOT compiler calls this function.
357 int HeapShared::get_archived_object_permanent_index(oop obj) {
358 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
359
360 if (!CDSConfig::is_dumping_heap()) {
361 return -1; // Called by the Leyden old workflow
362 }
363 if (_dumptime_permanent_oop_table == nullptr) {
364 return -1;
365 }
366
367 if (_orig_to_scratch_object_table != nullptr) {
368 OopHandle orig(&obj);
369 OopHandle* v = _orig_to_scratch_object_table->get(orig);
370 if (v != nullptr) {
371 obj = v->resolve();
372 }
373 }
374
375 OopHandle tmp(&obj);
376 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
377 if (info == nullptr) {
378 return -1;
379 } else {
380 if (info->_index < 0) {
381 info->_index = _dumptime_permanent_oop_count++;
382 }
383 return info->_index;
384 }
385 }
386
387 oop HeapShared::get_archived_object(int permanent_index) {
388 assert(permanent_index >= 0, "sanity");
389 assert(ArchiveHeapLoader::is_in_use(), "sanity");
390 assert(_runtime_permanent_oops != nullptr, "sanity");
391
392 oop obj = _runtime_permanent_oops->at(permanent_index).resolve();
393 log_info(cds)("GET perm obj %d = %p", permanent_index, cast_from_oop<void*>(obj));
394 if (obj != nullptr) {
395 log_info(cds)("GET perm obj %d class = %p", permanent_index, obj->klass());
396 log_info(cds)("GET perm obj %d class = %s", permanent_index, obj->klass()->external_name());
397 }
398 return obj;
399 }
400
401 // Remember all archived heap objects that have a permanent index.
402 // table[i] = offset of oop whose permanent index is i.
403 void CachedCodeDirectoryInternal::dumptime_init_internal() {
404 const int count = _dumptime_permanent_oop_count;
405 int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int));
406 for (int i = 0; i < count; i++) {
407 table[count] = -1;
408 }
409 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
410 int index = info._index;
411 if (index >= 0) {
412 assert(index < count, "sanity");
413 table[index] = info._heap_offset;
414 }
415 return true; // continue
416 });
417
418 for (int i = 0; i < count; i++) {
419 assert(table[i] >= 0, "must be");
420 }
421
422 log_info(cds)("Dumped %d permanent oops", count);
423
424 _permanent_oop_count = count;
425 CDSAccess::set_pointer(&_permanent_oop_offsets, table);
426 }
427
428 // This is called during the bootstrap of the production run, before any GC can happen.
429 // Record each permanent oop in a OopHandle for GC safety.
430 void CachedCodeDirectoryInternal::runtime_init_internal() {
431 int count = _permanent_oop_count;
432 int* table = _permanent_oop_offsets;
433 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
434 for (int i = 0; i < count; i++) {
435 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
436 OopHandle oh(Universe::vm_global(), obj);
437 _runtime_permanent_oops->append(oh);
438
439 ResourceMark rm;
440 log_info(cds)("perm obj %d = %p", i, cast_from_oop<void*>(obj));
441 if (obj != nullptr) {
442 log_info(cds)("perm obj %d class = %p", i, obj->klass());
443 log_info(cds)("perm obj %d class = %s", i, obj->klass()->external_name());
444 }
445 }
446 };
447
448 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
449 assert(_root_segment_max_size_elems > 0, "sanity");
450
451 // Try to avoid divisions for the common case.
452 if (idx < _root_segment_max_size_elems) {
453 seg_idx = 0;
454 int_idx = idx;
455 } else {
456 seg_idx = idx / _root_segment_max_size_elems;
457 int_idx = idx % _root_segment_max_size_elems;
458 }
459
460 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
461 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
462 }
463
464 // Returns an objArray that contains all the roots of the archived objects
465 oop HeapShared::get_root(int index, bool clear) {
466 assert(index >= 0, "sanity");
467 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
483 get_segment_indexes(index, seg_idx, int_idx);
484 if (log_is_enabled(Debug, cds, heap)) {
485 oop old = root_segment(seg_idx)->obj_at(int_idx);
486 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
487 }
488 root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
489 }
490 }
491
492 bool HeapShared::archive_object(oop obj) {
493 assert(CDSConfig::is_dumping_heap(), "dump-time only");
494
495 assert(!obj->is_stackChunk(), "do not archive stack chunks");
496 if (has_been_archived(obj)) {
497 return true;
498 }
499
500 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
501 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
502 p2i(obj), obj->size());
503 debug_trace();
504 return false;
505 } else {
506 count_allocation(obj->size());
507 ArchiveHeapWriter::add_source_obj(obj);
508 CachedOopInfo info = make_cached_oop_info(obj);
509 archived_object_cache()->put_when_absent(obj, info);
510 archived_object_cache()->maybe_grow();
511 mark_native_pointers(obj);
512
513 if (log_is_enabled(Debug, cds, heap)) {
514 ResourceMark rm;
515 LogTarget(Debug, cds, heap) log;
516 LogStream out(log);
517 out.print("Archived heap object " PTR_FORMAT " : %s ",
518 p2i(obj), obj->klass()->external_name());
519 if (java_lang_Class::is_instance(obj)) {
520 Klass* k = java_lang_Class::as_Klass(obj);
521 if (k != nullptr) {
522 out.print("%s", k->external_name());
523 } else {
524 out.print("primitive");
525 }
526 }
527 out.cr();
528 }
529
530 if (java_lang_Module::is_instance(obj) && Modules::check_archived_module_oop(obj)) {
531 Modules::update_oops_in_archived_module(obj, append_root(obj));
532 }
533
534 return true;
535 }
536 }
537
538 class MetaspaceObjToOopHandleTable: public ResourceHashtable<MetaspaceObj*, OopHandle,
539 36137, // prime number
540 AnyObj::C_HEAP,
541 mtClassShared> {
542 public:
543 oop get_oop(MetaspaceObj* ptr) {
544 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
545 OopHandle* handle = get(ptr);
546 if (handle != nullptr) {
547 return handle->resolve();
549 return nullptr;
550 }
551 }
552 void set_oop(MetaspaceObj* ptr, oop o) {
553 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
554 OopHandle handle(Universe::vm_global(), o);
555 bool is_new = put(ptr, handle);
556 assert(is_new, "cannot set twice");
557 }
558 void remove_oop(MetaspaceObj* ptr) {
559 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag);
560 OopHandle* handle = get(ptr);
561 if (handle != nullptr) {
562 handle->release(Universe::vm_global());
563 remove(ptr);
564 }
565 }
566 };
567
568 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
569 if (_scratch_references_table == nullptr) {
570 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
571 }
572 _scratch_references_table->set_oop(src, dest);
573 }
574
575 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
576 return (objArrayOop)_scratch_references_table->get_oop(src);
577 }
578
579 void HeapShared::init_scratch_objects(TRAPS) {
580 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
581 BasicType bt = (BasicType)i;
582 if (!is_reference_type(bt)) {
583 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
584 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
585 track_scratch_object(Universe::java_mirror(bt), m);
586 }
587 }
588 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable();
589 if (_scratch_references_table == nullptr) {
590 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable();
591 }
592 }
593
594 // Given java_mirror that represents a (primitive or reference) type T,
595 // return the "scratch" version that represents the same type T.
596 // Note that if java_mirror will be returned if it's already a
597 // scratch mirror.
598 //
599 // See java_lang_Class::create_scratch_mirror() for more info.
600 oop HeapShared::scratch_java_mirror(oop java_mirror) {
601 assert(java_lang_Class::is_instance(java_mirror), "must be");
602
603 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
604 BasicType bt = (BasicType)i;
605 if (!is_reference_type(bt)) {
606 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
607 return java_mirror;
608 }
609 }
610 }
611
612 if (java_lang_Class::is_primitive(java_mirror)) {
613 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
614 } else {
615 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
616 }
617 }
618
619 oop HeapShared::scratch_java_mirror(BasicType t) {
620 assert((uint)t < T_VOID+1, "range check");
621 assert(!is_reference_type(t), "sanity");
622 return _scratch_basic_type_mirrors[t].resolve();
623 }
624
625 oop HeapShared::scratch_java_mirror(Klass* k) {
626 return _scratch_java_mirror_table->get_oop(k);
627 }
628
629 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
630 track_scratch_object(k->java_mirror(), mirror);
631 _scratch_java_mirror_table->set_oop(k, mirror);
632 }
633
634 void HeapShared::remove_scratch_objects(Klass* k) {
635 _scratch_java_mirror_table->remove_oop(k);
636 if (k->is_instance_klass()) {
637 _scratch_references_table->remove(InstanceKlass::cast(k)->constants());
638 }
639 oop mirror = k->java_mirror();
640 if (mirror != nullptr) {
641 OopHandle tmp(&mirror);
642 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
643 if (v != nullptr) {
644 oop scratch_mirror = v->resolve();
645 java_lang_Class::set_klass(scratch_mirror, nullptr);
646 _orig_to_scratch_object_table->remove(tmp);
647 }
648 }
649 }
650
651 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
652 return ik->is_hidden() &&
653 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
654 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
655 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+"));
656 }
657
658 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
659 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
660 }
661
662 bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) {
663 return CDSConfig::is_dumping_invokedynamic() && (is_lambda_form_klass(ik) || is_lambda_proxy_klass(ik));
664 }
665
666 void HeapShared::copy_aot_initialized_mirror(Klass* orig_k, oop orig_mirror, oop m) {
667 if (!orig_k->is_instance_klass()) {
668 return;
669 }
670 InstanceKlass* ik = InstanceKlass::cast(orig_k);
671
672 if (HeapShared::is_archivable_hidden_klass(ik)) {
673 // We can't rerun the <clinit> method of hidden classes as we don't save
674 // the classData, so we must archive its mirror in initialized state.
675 assert(ik->is_initialized(), "must be");
676 }
677
678 if (!ik->is_initialized() || !AOTClassInitializer::can_archive_initialized_mirror(ik)) {
679 return;
680 }
681
682 int nfields = 0;
683 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
684 if (fs.access_flags().is_static()) {
685 fieldDescriptor& fd = fs.field_descriptor();
686 int offset = fd.offset();
687 switch (fd.field_type()) {
688 case T_OBJECT:
689 case T_ARRAY:
690 m->obj_field_put(offset, orig_mirror->obj_field(offset));
691 break;
692 case T_BOOLEAN:
693 m->bool_field_put(offset, orig_mirror->bool_field(offset));
694 break;
695 case T_BYTE:
696 m->byte_field_put(offset, orig_mirror->byte_field(offset));
697 break;
698 case T_SHORT:
699 m->short_field_put(offset, orig_mirror->short_field(offset));
700 break;
701 case T_CHAR:
702 m->char_field_put(offset, orig_mirror->char_field(offset));
703 break;
704 case T_INT:
705 m->int_field_put(offset, orig_mirror->int_field(offset));
706 break;
707 case T_LONG:
708 m->long_field_put(offset, orig_mirror->long_field(offset));
709 break;
710 case T_FLOAT:
711 m->float_field_put(offset, orig_mirror->float_field(offset));
712 break;
713 case T_DOUBLE:
714 m->double_field_put(offset, orig_mirror->double_field(offset));
715 break;
716 default:
717 ShouldNotReachHere();
718 }
719 nfields ++;
720 }
721 }
722
723 java_lang_Class::set_class_data(m, java_lang_Class::class_data(orig_mirror));
724
725 // Class::reflectData use SoftReference, which cannot be archived. Set it
726 // to null and it will be recreated at runtime.
727 java_lang_Class::set_reflection_data(m, nullptr);
728
729 if (log_is_enabled(Info, cds, init)) {
730 ResourceMark rm;
731 log_debug(cds, init)("copied %3d field(s) in preinitialized mirror %s%s", nfields, ik->external_name(),
732 ik->is_hidden() ? " (hidden)" : "");
733 }
734
735 InstanceKlass* buffered_ik = ArchiveBuilder::current()->get_buffered_addr(ik);
736 buffered_ik->set_has_preinitialized_mirror();
737 }
738
739 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
740 int src_hash = orig_mirror->identity_hash();
741 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
742 assert(scratch_m->mark().is_unlocked(), "sanity");
743
744 DEBUG_ONLY(int archived_hash = scratch_m->identity_hash());
745 assert(src_hash == archived_hash, "Java mirror wrong hash: original %x, scratch %x", src_hash, archived_hash);
746 }
747
748 void HeapShared::archive_java_mirrors() {
749 AOTClassInitializer::reset_preinit_check();
750
751 _orig_to_scratch_object_table->iterate([&](OopHandle o, OopHandle s) {
752 copy_java_mirror_hashcode(o.resolve(), s.resolve());
753 return true;
754 });
755
756 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
757 BasicType bt = (BasicType)i;
758 if (!is_reference_type(bt)) {
759 oop orig_mirror = Universe::java_mirror(bt);
760 oop m = _scratch_basic_type_mirrors[i].resolve();
761 assert(m != nullptr, "sanity");
762 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
763 assert(success, "sanity");
764
765 log_trace(cds, heap, mirror)(
766 "Archived %s mirror object from " PTR_FORMAT,
767 type2name(bt), p2i(m));
768
769 Universe::set_archived_basic_type_mirror_index(bt, append_root(m));
770 }
771 }
772
773 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses();
774 assert(klasses != nullptr, "sanity");
775
776 for (int i = 0; i < klasses->length(); i++) {
777 Klass* orig_k = klasses->at(i);
778 oop orig_mirror = orig_k->java_mirror();
779 oop m = scratch_java_mirror(orig_k);
780 if (m != nullptr) {
781 copy_aot_initialized_mirror(orig_k, orig_mirror, m);
782 if (CDSConfig::is_dumping_reflection_data() && java_lang_Class::has_reflection_data(orig_mirror)) {
783 oop reflection_data = java_lang_Class::reflection_data(orig_mirror);
784 bool success = archive_reachable_objects_from(1, _default_subgraph_info, reflection_data);
785 guarantee(success, "");
786 java_lang_Class::set_reflection_data(m, reflection_data);
787 }
788 }
789 }
790
791 for (int i = 0; i < klasses->length(); i++) {
792 Klass* orig_k = klasses->at(i);
793 oop orig_mirror = orig_k->java_mirror();
794 oop m = scratch_java_mirror(orig_k);
795 if (m != nullptr) {
796 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
797 bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
798 guarantee(success, "scratch mirrors must point to only archivable objects");
799 buffered_k->set_archived_java_mirror(append_root(m));
800 ResourceMark rm;
801 log_trace(cds, heap, mirror)(
802 "Archived %s mirror object from " PTR_FORMAT,
803 buffered_k->external_name(), p2i(m));
804
805 // archive the resolved_referenes array
806 if (buffered_k->is_instance_klass()) {
807 InstanceKlass* ik = InstanceKlass::cast(buffered_k);
808 oop rr = ik->constants()->prepare_resolved_references_for_archiving();
809 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
810 bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
811 assert(success, "must be");
812 int root_index = append_root(rr);
813 ik->constants()->cache()->set_archived_references(root_index);
820 void HeapShared::archive_strings() {
821 oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
822 bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
823 // We must succeed because:
824 // - _dumped_interned_strings do not contain any large strings.
825 // - StringTable::init_shared_table() doesn't create any large arrays.
826 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
827 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
828 }
829
830 int HeapShared::archive_exception_instance(oop exception) {
831 bool success = archive_reachable_objects_from(1, _default_subgraph_info, exception);
832 assert(success, "sanity");
833 return append_root(exception);
834 }
835
836 void HeapShared::mark_native_pointers(oop orig_obj) {
837 if (java_lang_Class::is_instance(orig_obj)) {
838 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
839 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
840 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
841 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
842 }
843 }
844
845 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
846 CachedOopInfo* info = archived_object_cache()->get(src_obj);
847 assert(info != nullptr, "must be");
848 has_oop_pointers = info->has_oop_pointers();
849 has_native_pointers = info->has_native_pointers();
850 }
851
852 void HeapShared::set_has_native_pointers(oop src_obj) {
853 CachedOopInfo* info = archived_object_cache()->get(src_obj);
854 assert(info != nullptr, "must be");
855 info->set_has_native_pointers();
856 }
857
858 void HeapShared::start_finding_archivable_hidden_classes() {
859 NoSafepointVerifier nsv;
860
861 init_seen_objects_table();
862
863 find_archivable_hidden_classes_helper(archive_subgraph_entry_fields);
864 if (CDSConfig::is_dumping_full_module_graph()) {
865 find_archivable_hidden_classes_helper(fmg_archive_subgraph_entry_fields);
866 }
867 }
868
869 void HeapShared::end_finding_archivable_hidden_classes() {
870 NoSafepointVerifier nsv;
871
872 delete_seen_objects_table();
873 }
874
875 void HeapShared::find_archivable_hidden_classes_helper(ArchivableStaticFieldInfo fields[]) {
876 if (!CDSConfig::is_dumping_heap()) {
877 return;
878 }
879 for (int i = 0; fields[i].valid(); ) {
880 ArchivableStaticFieldInfo* info = &fields[i];
881 const char* klass_name = info->klass_name;
882 for (; fields[i].valid(); i++) {
883 ArchivableStaticFieldInfo* f = &fields[i];
884 if (f->klass_name != klass_name) {
885 break;
886 }
887
888 InstanceKlass* k = f->klass;
889 oop m = k->java_mirror();
890 oop o = m->obj_field(f->offset);
891 if (o != nullptr) {
892 find_archivable_hidden_classes_in_object(o);
893 }
894 }
895 }
896 }
897
898 class HeapShared::FindHiddenClassesOopClosure: public BasicOopIterateClosure {
899 GrowableArray<oop> _stack;
900 template <class T> void do_oop_work(T *p) {
901 // Recurse on a GrowableArray to avoid overflowing the C stack.
902 oop o = RawAccess<>::oop_load(p);
903 if (o != nullptr) {
904 _stack.append(o);
905 }
906 }
907
908 public:
909
910 void do_oop(narrowOop *p) { FindHiddenClassesOopClosure::do_oop_work(p); }
911 void do_oop( oop *p) { FindHiddenClassesOopClosure::do_oop_work(p); }
912
913 FindHiddenClassesOopClosure(oop o) {
914 _stack.append(o);
915 }
916 oop pop() {
917 if (_stack.length() == 0) {
918 return nullptr;
919 } else {
920 return _stack.pop();
921 }
922 }
923 };
924
925 void HeapShared::find_archivable_hidden_classes_in_object(oop root) {
926 ResourceMark rm;
927 FindHiddenClassesOopClosure c(root);
928 oop o;
929 while ((o = c.pop()) != nullptr) {
930 if (!has_been_seen_during_subgraph_recording(o)) {
931 set_has_been_seen_during_subgraph_recording(o);
932
933 if (java_lang_Class::is_instance(o)) {
934 Klass* k = java_lang_Class::as_Klass(o);
935 if (k != nullptr && k->is_instance_klass()) {
936 SystemDictionaryShared::mark_required_class(InstanceKlass::cast(k));
937 }
938 } else if (java_lang_invoke_ResolvedMethodName::is_instance(o)) {
939 Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(o);
940 if (m != nullptr && m->method_holder() != nullptr) {
941 SystemDictionaryShared::mark_required_class(m->method_holder());
942 }
943 }
944
945 o->oop_iterate(&c);
946 }
947 }
948 }
949
950 void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
951 {
952 NoSafepointVerifier nsv;
953
954 _default_subgraph_info = init_subgraph_info(vmClasses::Object_klass(), false);
955 _trace = new GrowableArrayCHeap<oop, mtClassShared>(250);
956 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
957
958 // Cache for recording where the archived objects are copied to
959 create_archived_object_cache();
960
961 if (UseCompressedOops || UseG1GC) {
962 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
963 UseCompressedOops ? p2i(CompressedOops::begin()) :
964 p2i((address)G1CollectedHeap::heap()->reserved().start()),
965 UseCompressedOops ? p2i(CompressedOops::end()) :
966 p2i((address)G1CollectedHeap::heap()->reserved().end()));
967 }
968 copy_objects();
969
970 if (!SkipArchiveHeapVerification) {
971 CDSHeapVerifier::verify();
972 }
973 check_default_subgraph_classes();
974 }
975
976 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
977 for (int i = 0; i < _pending_roots->length(); i++) {
978 roots->append(_pending_roots->at(i).resolve());
979 }
980 ArchiveHeapWriter::write(roots, heap_info);
981 }
982
983 void HeapShared::copy_interned_strings() {
984 init_seen_objects_table();
985
986 auto copier = [&] (oop s, bool value_ignored) {
987 assert(s != nullptr, "sanity");
988 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
989 bool success = archive_reachable_objects_from(1, _default_subgraph_info, s);
990 assert(success, "must be");
991 // Prevent string deduplication from changing the value field to
992 // something not in the archive.
993 java_lang_String::set_deduplication_forbidden(s);
994 };
995 _dumped_interned_strings->iterate_all(copier);
996
997 delete_seen_objects_table();
998 }
999
1000 void HeapShared::copy_special_objects() {
1063 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
1064 // Only objects of boot classes can be included in sub-graph.
1065 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
1066 assert(CDSConfig::is_dumping_heap(), "dump time only");
1067 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
1068
1069 if (_subgraph_object_klasses == nullptr) {
1070 _subgraph_object_klasses =
1071 new (mtClass) GrowableArray<Klass*>(50, mtClass);
1072 }
1073
1074 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class");
1075
1076 if (_k == buffered_k) {
1077 // Don't add the Klass containing the sub-graph to it's own klass
1078 // initialization list.
1079 return;
1080 }
1081
1082 if (buffered_k->is_instance_klass()) {
1083 if (CDSConfig::is_dumping_invokedynamic()) {
1084 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class() ||
1085 HeapShared::is_lambda_proxy_klass(InstanceKlass::cast(buffered_k)),
1086 "we can archive only instances of boot classes or lambda proxy classes");
1087 } else {
1088 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(),
1089 "must be boot class");
1090 }
1091 // vmClasses::xxx_klass() are not updated, need to check
1092 // the original Klass*
1093 if (orig_k == vmClasses::String_klass() ||
1094 orig_k == vmClasses::Object_klass()) {
1095 // Initialized early during VM initialization. No need to be added
1096 // to the sub-graph object class list.
1097 return;
1098 }
1099 check_allowed_klass(InstanceKlass::cast(orig_k));
1100 } else if (buffered_k->is_objArray_klass()) {
1101 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass();
1102 if (abk->is_instance_klass()) {
1103 assert(InstanceKlass::cast(abk)->is_shared_boot_class(),
1104 "must be boot class");
1105 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass()));
1106 }
1107 if (buffered_k == Universe::objectArrayKlass()) {
1108 // Initialized early during Universe::genesis. No need to be added
1109 // to the list.
1110 return;
1111 }
1112 } else {
1113 assert(buffered_k->is_typeArray_klass(), "must be");
1114 // Primitive type arrays are created early during Universe::genesis.
1115 return;
1116 }
1117
1118 if (log_is_enabled(Debug, cds, heap)) {
1119 if (!_subgraph_object_klasses->contains(buffered_k)) {
1120 ResourceMark rm;
1121 log_debug(cds, heap)("Adding klass %s", orig_k->external_name());
1122 }
1123 }
1124
1125 _subgraph_object_klasses->append_if_missing(buffered_k);
1126 _has_non_early_klasses |= is_non_early_klass(orig_k);
1127 }
1128
1129 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) {
1130 if (CDSConfig::is_dumping_invokedynamic()) {
1131 // FIXME -- this allows LambdaProxy classes
1132 return;
1133 }
1134 if (ik->module()->name() == vmSymbols::java_base()) {
1135 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package");
1136 return;
1137 }
1138
1139 #ifndef PRODUCT
1140 if (!ik->module()->is_named() && ik->package() == nullptr) {
1141 // This class is loaded by ArchiveHeapTestClass
1142 return;
1143 }
1144 const char* extra_msg = ", or in an unnamed package of an unnamed module";
1145 #else
1146 const char* extra_msg = "";
1147 #endif
1148
1149 ResourceMark rm;
1150 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s",
1151 ik->external_name(), extra_msg);
1152 MetaspaceShared::unrecoverable_writing_error();
1153 }
1211 _subgraph_object_klasses =
1212 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses);
1213 for (int i = 0; i < num_subgraphs_klasses; i++) {
1214 Klass* subgraph_k = subgraph_object_klasses->at(i);
1215 if (log_is_enabled(Info, cds, heap)) {
1216 ResourceMark rm;
1217 log_info(cds, heap)(
1218 "Archived object klass %s (%2d) => %s",
1219 _k->external_name(), i, subgraph_k->external_name());
1220 }
1221 _subgraph_object_klasses->at_put(i, subgraph_k);
1222 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i));
1223 }
1224 }
1225
1226 ArchivePtrMarker::mark_pointer(&_k);
1227 ArchivePtrMarker::mark_pointer(&_entry_field_records);
1228 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses);
1229 }
1230
1231 class HeapShared::CopyKlassSubGraphInfoToArchive : StackObj {
1232 CompactHashtableWriter* _writer;
1233 public:
1234 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
1235
1236 bool do_entry(Klass* klass, KlassSubGraphInfo& info) {
1237 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) {
1238 ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info);
1239 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass);
1240 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k);
1241 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
1242 _writer->add(hash, delta);
1243 }
1244 return true; // keep on iterating
1245 }
1246 };
1247
1248 ArchivedKlassSubGraphInfoRecord* HeapShared::archive_subgraph_info(KlassSubGraphInfo* info) {
1249 ArchivedKlassSubGraphInfoRecord* record =
1250 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
1251 record->init(info);
1252 if (info == _default_subgraph_info) {
1253 _runtime_default_subgraph_info = record;
1254 }
1255 return record;
1256 }
1257
1258 // Build the records of archived subgraph infos, which include:
1259 // - Entry points to all subgraphs from the containing class mirror. The entry
1260 // points are static fields in the mirror. For each entry point, the field
1261 // offset, and value are recorded in the sub-graph
1262 // info. The value is stored back to the corresponding field at runtime.
1263 // - A list of klasses that need to be loaded/initialized before archived
1264 // java object sub-graph can be accessed at runtime.
1265 void HeapShared::write_subgraph_info_table() {
1266 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive.
1267 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table;
1268 CompactHashtableStats stats;
1269
1270 _run_time_subgraph_info_table.reset();
1271
1272 CompactHashtableWriter writer(d_table->_count, &stats);
1273 CopyKlassSubGraphInfoToArchive copy(&writer);
1274 d_table->iterate(©);
1275 writer.dump(&_run_time_subgraph_info_table, "subgraphs");
1276
1277 #ifndef PRODUCT
1294 _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
1295 }
1296 _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
1297 }
1298
1299 void HeapShared::init_root_segment_sizes(int max_size_elems) {
1300 _root_segment_max_size_elems = max_size_elems;
1301 }
1302
1303 void HeapShared::serialize_tables(SerializeClosure* soc) {
1304
1305 #ifndef PRODUCT
1306 soc->do_ptr(&_archived_ArchiveHeapTestClass);
1307 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) {
1308 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0);
1309 setup_test_class(_test_class_name);
1310 }
1311 #endif
1312
1313 _run_time_subgraph_info_table.serialize_header(soc);
1314 soc->do_ptr(&_runtime_default_subgraph_info);
1315 }
1316
1317 static void verify_the_heap(Klass* k, const char* which) {
1318 if (VerifyArchivedFields > 0) {
1319 ResourceMark rm;
1320 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s",
1321 which, k->external_name());
1322
1323 VM_Verify verify_op;
1324 VMThread::execute(&verify_op);
1325
1326 if (VerifyArchivedFields > 1 && is_init_completed()) {
1327 // At this time, the oop->klass() of some archived objects in the heap may not
1328 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
1329 // have enough information (object size, oop maps, etc) so that a GC can be safely
1330 // performed.
1331 //
1332 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage
1333 // to check for GC safety.
1334 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s",
1335 which, k->external_name());
1336 FlagSetting fs1(VerifyBeforeGC, true);
1337 FlagSetting fs2(VerifyDuringGC, true);
1338 FlagSetting fs3(VerifyAfterGC, true);
1339 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1340 }
1341 }
1342 }
1343
1344 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1345 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1346 //
1347 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1348 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1349 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1350 void HeapShared::resolve_classes(JavaThread* current) {
1351 assert(CDSConfig::is_using_archive(), "runtime only!");
1352 if (!ArchiveHeapLoader::is_in_use()) {
1353 return; // nothing to do
1354 }
1355
1356 if (!CDSConfig::is_using_aot_linked_classes()) {
1357 assert( _runtime_default_subgraph_info != nullptr, "must be");
1358 Array<Klass*>* klasses = _runtime_default_subgraph_info->subgraph_object_klasses();
1359 if (klasses != nullptr) {
1360 for (int i = 0; i < klasses->length(); i++) {
1361 Klass* k = klasses->at(i);
1362 ExceptionMark em(current); // no exception can happen here
1363 resolve_or_init(k, /*do_init*/false, current);
1364 }
1365 }
1366 }
1367
1368 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1369 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1370 }
1371
1372 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1373 for (int i = 0; fields[i].valid(); i++) {
1374 ArchivableStaticFieldInfo* info = &fields[i];
1375 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1376 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1377 assert(k != nullptr && k->is_shared_boot_class(), "sanity");
1378 resolve_classes_for_subgraph_of(current, k);
1379 }
1380 }
1381
1382 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1383 JavaThread* THREAD = current;
1384 ExceptionMark em(THREAD);
1385 const ArchivedKlassSubGraphInfoRecord* record =
1386 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1387 if (HAS_PENDING_EXCEPTION) {
1388 CLEAR_PENDING_EXCEPTION;
1389 }
1390 if (record == nullptr) {
1391 clear_archived_roots_of(k);
1392 }
1393 }
1394
1395 void HeapShared::initialize_java_lang_invoke(TRAPS) {
1396 if (CDSConfig::is_loading_invokedynamic() || CDSConfig::is_dumping_invokedynamic()) {
1397 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK);
1398 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK);
1399 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK);
1400 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK);
1401 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK);
1402 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK);
1403 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK);
1404 }
1405 }
1406
1407 void HeapShared::initialize_default_subgraph_classes(Handle class_loader, TRAPS) {
1408 if (!ArchiveHeapLoader::is_in_use()) {
1409 return;
1410 }
1411
1412 assert( _runtime_default_subgraph_info != nullptr, "must be");
1413 Array<Klass*>* klasses = _runtime_default_subgraph_info->subgraph_object_klasses();
1414 if (klasses != nullptr) {
1415 for (int pass = 0; pass < 2; pass ++) {
1416 for (int i = 0; i < klasses->length(); i++) {
1417 Klass* k = klasses->at(i);
1418 if (k->class_loader_data() == nullptr) {
1419 // This class is not yet loaded. We will initialize it in a later phase.
1420 continue;
1421 }
1422 if (k->class_loader() == class_loader()) {
1423 if (pass == 0) {
1424 if (k->is_instance_klass()) {
1425 InstanceKlass::cast(k)->link_class(CHECK);
1426 }
1427 } else {
1428 resolve_or_init(k, /*do_init*/true, CHECK);
1429 }
1430 }
1431 }
1432 }
1433 }
1434 }
1435
1436 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
1437 JavaThread* THREAD = current;
1438 if (!ArchiveHeapLoader::is_in_use()) {
1439 return; // nothing to do
1440 }
1441
1442 if (k->name()->equals("jdk/internal/module/ArchivedModuleGraph") &&
1443 !CDSConfig::is_using_optimized_module_handling() &&
1444 // archive was created with --module-path
1445 ClassLoaderExt::num_module_paths() > 0) {
1446 // ArchivedModuleGraph was created with a --module-path that's different than the runtime --module-path.
1447 // Thus, it might contain references to modules that do not exist at runtime. We cannot use it.
1448 log_info(cds, heap)("Skip initializing ArchivedModuleGraph subgraph: is_using_optimized_module_handling=%s num_module_paths=%d",
1449 BOOL_TO_STR(CDSConfig::is_using_optimized_module_handling()), ClassLoaderExt::num_module_paths());
1450 return;
1451 }
1452
1453 ExceptionMark em(THREAD);
1454 const ArchivedKlassSubGraphInfoRecord* record =
1455 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
1519
1520 resolve_or_init(k, do_init, CHECK_NULL);
1521
1522 // Load/link/initialize the klasses of the objects in the subgraph.
1523 // nullptr class loader is used.
1524 Array<Klass*>* klasses = record->subgraph_object_klasses();
1525 if (klasses != nullptr) {
1526 for (int i = 0; i < klasses->length(); i++) {
1527 Klass* klass = klasses->at(i);
1528 if (!klass->is_shared()) {
1529 return nullptr;
1530 }
1531 resolve_or_init(klass, do_init, CHECK_NULL);
1532 }
1533 }
1534 }
1535
1536 return record;
1537 }
1538
1539 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1540 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name);
1541 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1542 if (k == nullptr) {
1543 return;
1544 }
1545 assert(k->is_shared_boot_class(), "sanity");
1546 resolve_or_init(k, false, CHECK);
1547 if (do_init) {
1548 resolve_or_init(k, true, CHECK);
1549 }
1550 }
1551
1552 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1553 if (!do_init) {
1554 if (k->class_loader_data() == nullptr) {
1555 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1556 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1557 }
1558 } else {
1559 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1560 if (k->is_instance_klass()) {
1561 InstanceKlass* ik = InstanceKlass::cast(k);
1562 ik->initialize(CHECK);
1563 } else if (k->is_objArray_klass()) {
1564 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1565 oak->initialize(CHECK);
1566 }
1567 }
1568 }
1569
1570 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1571 verify_the_heap(k, "before");
1679
1680 template <class T> void check(T *p) {
1681 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1682 }
1683
1684 public:
1685 PointsToOopsChecker() : _result(false) {}
1686 void do_oop(narrowOop *p) { check(p); }
1687 void do_oop( oop *p) { check(p); }
1688 bool result() { return _result; }
1689 };
1690
1691 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) {
1692 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1693 oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
1694 PointsToOopsChecker points_to_oops_checker;
1695 obj->oop_iterate(&points_to_oops_checker);
1696 return CachedOopInfo(referrer, points_to_oops_checker.result());
1697 }
1698
1699 // We currently allow only the box classes, as well as j.l.Object, which are
1700 // initialized very early by HeapShared::init_box_classes().
1701 bool HeapShared::can_mirror_be_used_in_subgraph(oop orig_java_mirror) {
1702 return java_lang_Class::is_primitive(orig_java_mirror)
1703 || orig_java_mirror == vmClasses::Boolean_klass()->java_mirror()
1704 || orig_java_mirror == vmClasses::Character_klass()->java_mirror()
1705 || orig_java_mirror == vmClasses::Float_klass()->java_mirror()
1706 || orig_java_mirror == vmClasses::Double_klass()->java_mirror()
1707 || orig_java_mirror == vmClasses::Byte_klass()->java_mirror()
1708 || orig_java_mirror == vmClasses::Short_klass()->java_mirror()
1709 || orig_java_mirror == vmClasses::Integer_klass()->java_mirror()
1710 || orig_java_mirror == vmClasses::Long_klass()->java_mirror()
1711 || orig_java_mirror == vmClasses::Void_klass()->java_mirror()
1712 || orig_java_mirror == vmClasses::Object_klass()->java_mirror();
1713 }
1714
1715 void HeapShared::init_box_classes(TRAPS) {
1716 if (ArchiveHeapLoader::is_in_use()) {
1717 vmClasses::Boolean_klass()->initialize(CHECK);
1718 vmClasses::Character_klass()->initialize(CHECK);
1719 vmClasses::Float_klass()->initialize(CHECK);
1720 vmClasses::Double_klass()->initialize(CHECK);
1721 vmClasses::Byte_klass()->initialize(CHECK);
1722 vmClasses::Short_klass()->initialize(CHECK);
1723 vmClasses::Integer_klass()->initialize(CHECK);
1724 vmClasses::Long_klass()->initialize(CHECK);
1725 vmClasses::Void_klass()->initialize(CHECK);
1726 }
1727 }
1728
1729 void HeapShared::exit_on_error() {
1730 if (_context != nullptr) {
1731 ResourceMark rm;
1732 LogStream ls(Log(cds, heap)::error());
1733 ls.print_cr("Context");
1734 for (int i = 0; i < _context->length(); i++) {
1735 const char* s = _context->at(i);
1736 ls.print_cr("- %s", s);
1737 }
1738 }
1739 if (_trace != nullptr) {
1740 ResourceMark rm;
1741 LogStream ls(Log(cds, heap)::error());
1742 ls.print_cr("Reference trace");
1743 for (int i = 0; i < _trace->length(); i++) {
1744 oop orig_obj = _trace->at(i);
1745 ls.print_cr("[%d] ========================================", i);
1746 orig_obj->print_on(&ls);
1747 ls.cr();
1748 }
1749 }
1750 MetaspaceShared::unrecoverable_writing_error();
1751 }
1752
1753 void HeapShared::debug_trace() {
1754 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
1755 if (walker != nullptr) {
1756 LogStream ls(Log(cds, heap)::error());
1757 CDSHeapVerifier::trace_to_root(&ls, walker->referencing_obj());
1758 }
1759 }
1760
1761 // (1) If orig_obj has not been archived yet, archive it.
1762 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1763 // trace all objects that are reachable from it, and make sure these objects are archived.
1764 // (3) Record the klasses of all orig_obj and all reachable objects.
1765 bool HeapShared::archive_reachable_objects_from(int level,
1766 KlassSubGraphInfo* subgraph_info,
1767 oop orig_obj) {
1768 ArchivingObjectMark mark(orig_obj);
1769 assert(orig_obj != nullptr, "must be");
1770
1771 if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
1772 // This object has injected fields that cannot be supported easily, so we disallow them for now.
1773 // If you get an error here, you probably made a change in the JDK library that has added
1774 // these objects that are referenced (directly or indirectly) by static fields.
1775 ResourceMark rm;
1776 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name());
1777 debug_trace();
1778 exit_on_error();
1779 }
1780
1781 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) {
1782 ResourceMark rm;
1783 LogTarget(Debug, cds, heap) log;
1784 LogStream out(log);
1785 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj));
1786 Klass* k = java_lang_Class::as_Klass(orig_obj);
1787 if (k != nullptr) {
1788 out.print("%s", k->external_name());
1789 } else {
1790 out.print("primitive");
1791 }
1792 out.print_cr("; scratch mirror = " PTR_FORMAT,
1793 p2i(scratch_java_mirror(orig_obj)));
1794 }
1795
1796 if (java_lang_Class::is_instance(orig_obj)) {
1797 orig_obj = scratch_java_mirror(orig_obj);
1798 assert(orig_obj != nullptr, "must be archived");
1799 }
1800
1801 if (has_been_seen_during_subgraph_recording(orig_obj)) {
1802 // orig_obj has already been archived and traced. Nothing more to do.
1803 return true;
1804 } else {
1805 set_has_been_seen_during_subgraph_recording(orig_obj);
1806 }
1807
1808 bool already_archived = has_been_archived(orig_obj);
1809 bool record_klasses_only = already_archived;
1810 if (!already_archived) {
1811 ++_num_new_archived_objs;
1812 if (!archive_object(orig_obj)) {
1813 // Skip archiving the sub-graph referenced from the current entry field.
1814 ResourceMark rm;
1815 log_error(cds, heap)(
1816 "Cannot archive the sub-graph referenced from %s object ("
1817 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.",
1818 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize);
1819 if (level == 1) {
1820 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1821 // as the Java code will take care of initializing this field dynamically.
1822 return false;
1823 } else {
1824 // We don't know how to handle an object that has been archived, but some of its reachable
1825 // objects cannot be archived. Bail out for now. We might need to fix this in the future if
1826 // we have a real use case.
1827 exit_on_error();
1828 }
1829 }
1830 }
1831
1832 Klass *orig_k = orig_obj->klass();
1833 subgraph_info->add_subgraph_object_klass(orig_k);
1834
1835 WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
1836 orig_obj->oop_iterate(&walker);
1837
1838 if (CDSConfig::is_initing_classes_at_dump_time()) {
1839 // The enum klasses are archived with preinitialized mirror.
1840 // See AOTClassInitializer::can_archive_initialized_mirror.
1841 } else {
1842 if (CDSEnumKlass::is_enum_obj(orig_obj)) {
1843 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj);
1844 }
1845 }
1846
1847 return true;
1848 }
1849
1850 //
1851 // Start from the given static field in a java mirror and archive the
1852 // complete sub-graph of java heap objects that are reached directly
1853 // or indirectly from the starting object by following references.
1854 // Sub-graph archiving restrictions (current):
1855 //
1856 // - All classes of objects in the archived sub-graph (including the
1857 // entry class) must be boot class only.
1858 // - No java.lang.Class instance (java mirror) can be included inside
1859 // an archived sub-graph. Mirror can only be the sub-graph entry object.
1860 //
1861 // The Java heap object sub-graph archiving process (see
1862 // WalkOopAndArchiveClosure):
1863 //
1864 // 1) Java object sub-graph archiving starts from a given static field
1865 // within a Class instance (java mirror). If the static field is a
1866 // reference field and points to a non-null java object, proceed to
1944 if (!CompressedOops::is_null(f)) {
1945 verify_subgraph_from(f);
1946 }
1947 }
1948
1949 void HeapShared::verify_subgraph_from(oop orig_obj) {
1950 if (!has_been_archived(orig_obj)) {
1951 // It's OK for the root of a subgraph to be not archived. See comments in
1952 // archive_reachable_objects_from().
1953 return;
1954 }
1955
1956 // Verify that all objects reachable from orig_obj are archived.
1957 init_seen_objects_table();
1958 verify_reachable_objects_from(orig_obj);
1959 delete_seen_objects_table();
1960 }
1961
1962 void HeapShared::verify_reachable_objects_from(oop obj) {
1963 _num_total_verifications ++;
1964 if (java_lang_Class::is_instance(obj)) {
1965 obj = scratch_java_mirror(obj);
1966 assert(obj != nullptr, "must be");
1967 }
1968 if (!has_been_seen_during_subgraph_recording(obj)) {
1969 set_has_been_seen_during_subgraph_recording(obj);
1970 assert(has_been_archived(obj), "must be");
1971 VerifySharedOopClosure walker;
1972 obj->oop_iterate(&walker);
1973 }
1974 }
1975 #endif
1976
1977 // The "default subgraph" contains special objects (see heapShared.hpp) that
1978 // can be accessed before we load any Java classes (including java/lang/Class).
1979 // Make sure that these are only instances of the very few specific types
1980 // that we can handle.
1981 void HeapShared::check_default_subgraph_classes() {
1982 if (CDSConfig::is_initing_classes_at_dump_time()) {
1983 return;
1984 }
1985
1986 GrowableArray<Klass*>* klasses = _default_subgraph_info->subgraph_object_klasses();
1987 int num = klasses->length();
1988 for (int i = 0; i < num; i++) {
1989 Klass* subgraph_k = klasses->at(i);
1990 Symbol* name = ArchiveBuilder::current()->get_source_addr(subgraph_k->name());
1991 if (subgraph_k->is_instance_klass() &&
1992 name != vmSymbols::java_lang_Class() &&
1993 name != vmSymbols::java_lang_String() &&
1994 name != vmSymbols::java_lang_ArithmeticException() &&
1995 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1996 name != vmSymbols::java_lang_ArrayStoreException() &&
1997 name != vmSymbols::java_lang_ClassCastException() &&
1998 name != vmSymbols::java_lang_InternalError() &&
1999 name != vmSymbols::java_lang_NullPointerException()) {
2000 ResourceMark rm;
2001 fatal("default subgraph cannot have objects of type %s", subgraph_k->external_name());
2002 }
2003 }
2004 }
2005
2006 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2007 int HeapShared::_num_new_walked_objs;
2008 int HeapShared::_num_new_archived_objs;
2009 int HeapShared::_num_old_recorded_klasses;
2010
2011 int HeapShared::_num_total_subgraph_recordings = 0;
2012 int HeapShared::_num_total_walked_objs = 0;
2013 int HeapShared::_num_total_archived_objs = 0;
2014 int HeapShared::_num_total_recorded_klasses = 0;
2015 int HeapShared::_num_total_verifications = 0;
2016
2017 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) {
2018 return _seen_objects_table->get(obj) != nullptr;
2019 }
2020
2021 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) {
2022 assert(!has_been_seen_during_subgraph_recording(obj), "sanity");
2205 return false;
2206 }
2207
2208 // See KlassSubGraphInfo::check_allowed_klass() - only two types of
2209 // classes are allowed:
2210 // (A) java.base classes (which must not be in the unnamed module)
2211 // (B) test classes which must be in the unnamed package of the unnamed module.
2212 // So if we see a '/' character in the class name, it must be in (A);
2213 // otherwise it must be in (B).
2214 if (name->index_of_at(0, "/", 1) >= 0) {
2215 return false; // (A)
2216 }
2217
2218 return true; // (B)
2219 }
2220 }
2221 }
2222
2223 return false;
2224 }
2225
2226 void HeapShared::initialize_test_class_from_archive(JavaThread* current) {
2227 Klass* k = _test_class;
2228 if (k != nullptr && ArchiveHeapLoader::is_in_use()) {
2229 JavaThread* THREAD = current;
2230 ExceptionMark em(THREAD);
2231 const ArchivedKlassSubGraphInfoRecord* record =
2232 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
2233
2234 // The _test_class is in the unnamed module, so it can't call CDS.initializeFromArchive()
2235 // from its <clinit> method. So we set up its "archivedObjects" field first, before
2236 // calling its <clinit>. This is not strictly clean, but it's a convenient way to write unit
2237 // test cases (see test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchiveHeapTestClass.java).
2238 if (record != nullptr) {
2239 init_archived_fields_for(k, record);
2240 }
2241 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD);
2242 }
2243 }
2244 #endif
2245
2246 void HeapShared::init_for_dumping(TRAPS) {
2247 if (HeapShared::can_write()) {
2248 setup_test_class(ArchiveHeapTestClass);
2249 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
2250 init_subgraph_entry_fields(CHECK);
2251 }
2252 }
2253
2254 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2255 bool is_full_module_graph) {
2256 _num_total_subgraph_recordings = 0;
2257 _num_total_walked_objs = 0;
2258 _num_total_archived_objs = 0;
2259 _num_total_recorded_klasses = 0;
2260 _num_total_verifications = 0;
2261
2262 // For each class X that has one or more archived fields:
2263 // [1] Dump the subgraph of each archived field
2264 // [2] Create a list of all the class of the objects that can be reached
2265 // by any of these static fields.
2266 // At runtime, these classes are initialized before X's archived fields
2267 // are restored by HeapShared::initialize_from_archived_subgraph().
2268 for (int i = 0; fields[i].valid(); ) {
2269 ArchivableStaticFieldInfo* info = &fields[i];
2270 const char* klass_name = info->klass_name;
2271 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2272
2273 ContextMark cm(klass_name);
2274 // If you have specified consecutive fields of the same klass in
2275 // fields[], these will be archived in the same
2276 // {start_recording_subgraph ... done_recording_subgraph} pass to
2277 // save time.
2278 for (; fields[i].valid(); i++) {
2279 ArchivableStaticFieldInfo* f = &fields[i];
2280 if (f->klass_name != klass_name) {
2281 break;
2282 }
2283
2284 ContextMark cm(f->field_name);
2285 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2286 f->offset, f->field_name);
2287 }
2288 done_recording_subgraph(info->klass, klass_name);
2289 }
2290
2291 log_info(cds, heap)("Archived subgraph records = %d",
2292 _num_total_subgraph_recordings);
2293 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
2294 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);
2295 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2296
2297 #ifndef PRODUCT
2298 for (int i = 0; fields[i].valid(); i++) {
2299 ArchivableStaticFieldInfo* f = &fields[i];
2300 verify_subgraph_from_static_field(f->klass, f->offset);
2301 }
2302 log_info(cds, heap)(" Verified %d references", _num_total_verifications);
2303 #endif
2304 }
|