6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotClassInitializer.hpp"
27 #include "cds/aotClassLocation.hpp"
28 #include "cds/aotLogging.hpp"
29 #include "cds/aotMetaspace.hpp"
30 #include "cds/aotReferenceObjSupport.hpp"
31 #include "cds/archiveBuilder.hpp"
32 #include "cds/archiveHeapLoader.hpp"
33 #include "cds/archiveHeapWriter.hpp"
34 #include "cds/archiveUtils.hpp"
35 #include "cds/cdsConfig.hpp"
36 #include "cds/cdsEnumKlass.hpp"
37 #include "cds/cdsHeapVerifier.hpp"
38 #include "cds/heapShared.hpp"
39 #include "cds/regeneratedClasses.hpp"
40 #include "classfile/classLoaderData.hpp"
41 #include "classfile/javaClasses.inline.hpp"
42 #include "classfile/modules.hpp"
43 #include "classfile/stringTable.hpp"
44 #include "classfile/symbolTable.hpp"
45 #include "classfile/systemDictionary.hpp"
46 #include "classfile/systemDictionaryShared.hpp"
47 #include "classfile/vmClasses.hpp"
48 #include "classfile/vmSymbols.hpp"
49 #include "gc/shared/collectedHeap.hpp"
50 #include "gc/shared/gcLocker.hpp"
51 #include "gc/shared/gcVMOperations.hpp"
52 #include "logging/log.hpp"
53 #include "logging/logStream.hpp"
54 #include "memory/iterator.inline.hpp"
55 #include "memory/resourceArea.hpp"
56 #include "memory/universe.hpp"
57 #include "oops/compressedOops.inline.hpp"
58 #include "oops/fieldStreams.inline.hpp"
59 #include "oops/objArrayOop.inline.hpp"
60 #include "oops/oop.inline.hpp"
61 #include "oops/typeArrayOop.inline.hpp"
62 #include "prims/jvmtiExport.hpp"
63 #include "runtime/arguments.hpp"
64 #include "runtime/fieldDescriptor.inline.hpp"
65 #include "runtime/init.hpp"
66 #include "runtime/javaCalls.hpp"
67 #include "runtime/mutexLocker.hpp"
68 #include "runtime/safepointVerifiers.hpp"
69 #include "utilities/bitMap.inline.hpp"
70 #include "utilities/copy.hpp"
71 #if INCLUDE_G1GC
72 #include "gc/g1/g1CollectedHeap.hpp"
73 #endif
74
75 #if INCLUDE_CDS_JAVA_HEAP
76
77 struct ArchivableStaticFieldInfo {
78 const char* klass_name;
79 const char* field_name;
80 InstanceKlass* klass;
81 int offset;
82 BasicType type;
83
84 ArchivableStaticFieldInfo(const char* k, const char* f)
85 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
86
87 bool valid() {
88 return klass_name != nullptr;
89 }
90 };
91
92 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
93
94 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
95 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
96 size_t HeapShared::_total_obj_count;
97 size_t HeapShared::_total_obj_size;
98
99 #ifndef PRODUCT
100 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
101 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
102 static const char* _test_class_name = nullptr;
103 static Klass* _test_class = nullptr;
104 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
105 #endif
106
107
108 //
109 // If you add new entries to the following tables, you should know what you're doing!
110 //
111
112 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
113 {"java/lang/Integer$IntegerCache", "archivedCache"},
114 {"java/lang/Long$LongCache", "archivedCache"},
115 {"java/lang/Byte$ByteCache", "archivedCache"},
116 {"java/lang/Short$ShortCache", "archivedCache"},
117 {"java/lang/Character$CharacterCache", "archivedCache"},
118 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
119 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
120 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
121 {"java/util/ImmutableCollections", "archivedObjects"},
122 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
123 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
124 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
125
126 #ifndef PRODUCT
127 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
128 #endif
129 {nullptr, nullptr},
130 };
131
132 // full module graph
133 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
134 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
135 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
136 {"java/lang/Module$ArchivedData", "archivedData"},
137 {nullptr, nullptr},
138 };
139
140 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
141 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
142 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
143 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
144 int HeapShared::_root_segment_max_size_elems;
145 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
146 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
147
148 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
149 for (int i = 0; fields[i].valid(); i++) {
150 if (fields[i].klass == ik) {
151 return true;
152 }
153 }
154 return false;
155 }
156
157 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
158 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
159 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
160 }
161
162 unsigned HeapShared::oop_hash(oop const& p) {
163 // Do not call p->identity_hash() as that will update the
164 // object header.
165 return primitive_hash(cast_from_oop<intptr_t>(p));
166 }
167
168 static void reset_states(oop obj, TRAPS) {
169 Handle h_obj(THREAD, obj);
170 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
171 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
172 Symbol* method_sig = vmSymbols::void_method_signature();
173
174 while (klass != nullptr) {
175 Method* method = klass->find_method(method_name, method_sig);
176 if (method != nullptr) {
177 assert(method->is_private(), "must be");
178 if (log_is_enabled(Debug, aot)) {
179 ResourceMark rm(THREAD);
180 log_debug(aot)(" calling %s", method->name_and_sig_as_C_string());
181 }
182 JavaValue result(T_VOID);
183 JavaCalls::call_special(&result, h_obj, klass,
184 method_name, method_sig, CHECK);
185 }
186 klass = klass->super();
187 }
199 // to keep track of resources, etc, loaded by the null class loader.
200 //
201 // Note, this object is non-null, and is not the same as
202 // ClassLoaderData::the_null_class_loader_data()->class_loader(),
203 // which is null.
204 log_debug(aot)("Resetting boot loader");
205 JavaValue result(T_OBJECT);
206 JavaCalls::call_static(&result,
207 vmClasses::jdk_internal_loader_ClassLoaders_klass(),
208 vmSymbols::bootLoader_name(),
209 vmSymbols::void_BuiltinClassLoader_signature(),
210 CHECK);
211 Handle boot_loader(THREAD, result.get_oop());
212 reset_states(boot_loader(), CHECK);
213 }
214
215 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
216
217 bool HeapShared::has_been_archived(oop obj) {
218 assert(CDSConfig::is_dumping_heap(), "dump-time only");
219 return archived_object_cache()->get(obj) != nullptr;
220 }
221
222 int HeapShared::append_root(oop obj) {
223 assert(CDSConfig::is_dumping_heap(), "dump-time only");
224 if (obj != nullptr) {
225 assert(has_been_archived(obj), "must be");
226 }
227 // No GC should happen since we aren't scanning _pending_roots.
228 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
229
230 return _pending_roots->append(obj);
231 }
232
233 objArrayOop HeapShared::root_segment(int segment_idx) {
234 if (CDSConfig::is_dumping_heap()) {
235 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
236 } else {
237 assert(CDSConfig::is_using_archive(), "must be");
238 }
239
240 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
241 assert(segment != nullptr, "should have been initialized");
242 return segment;
243 }
244
245 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
246 assert(_root_segment_max_size_elems > 0, "sanity");
247
248 // Try to avoid divisions for the common case.
249 if (idx < _root_segment_max_size_elems) {
250 seg_idx = 0;
251 int_idx = idx;
252 } else {
253 seg_idx = idx / _root_segment_max_size_elems;
254 int_idx = idx % _root_segment_max_size_elems;
255 }
256
257 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
258 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
259 }
260
261 // Returns an objArray that contains all the roots of the archived objects
262 oop HeapShared::get_root(int index, bool clear) {
263 assert(index >= 0, "sanity");
264 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
286 }
287 }
288
289 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
290 assert(CDSConfig::is_dumping_heap(), "dump-time only");
291
292 assert(!obj->is_stackChunk(), "do not archive stack chunks");
293 if (has_been_archived(obj)) {
294 return true;
295 }
296
297 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
298 log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
299 p2i(obj), obj->size());
300 debug_trace();
301 return false;
302 } else {
303 count_allocation(obj->size());
304 ArchiveHeapWriter::add_source_obj(obj);
305 CachedOopInfo info = make_cached_oop_info(obj, referrer);
306 archived_object_cache()->put_when_absent(obj, info);
307 archived_object_cache()->maybe_grow();
308 mark_native_pointers(obj);
309
310 Klass* k = obj->klass();
311 if (k->is_instance_klass()) {
312 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
313 // This ensures that during the production run, whenever Java code sees a cached object
314 // of type X, we know that X is already initialized. (see TODO comment below ...)
315
316 if (InstanceKlass::cast(k)->is_enum_subclass()
317 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
318 // we must store them as AOT-initialized.
319 || (subgraph_info == _dump_time_special_subgraph))
320 // TODO: we do this only for the special subgraph for now. Extending this to
321 // other subgraphs would require more refactoring of the core library (such as
322 // move some initialization logic into runtimeSetup()).
323 //
324 // For the other subgraphs, we have a weaker mechanism to ensure that
325 // all classes in a subgraph are initialized before the subgraph is programmatically
326 // returned from jdk.internal.misc.CDS::initializeFromArchive().
392 OopHandle* handle = get(ptr);
393 if (handle != nullptr) {
394 handle->release(Universe::vm_global());
395 remove(ptr);
396 }
397 }
398 };
399
400 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
401 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
402 _scratch_objects_table->set_oop(src, dest);
403 }
404 }
405
406 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
407 return (objArrayOop)_scratch_objects_table->get_oop(src);
408 }
409
410 void HeapShared::init_dumping() {
411 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
412 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
413 }
414
415 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
416 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
417 BasicType bt = (BasicType)i;
418 if (!is_reference_type(bt)) {
419 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
420 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
421 }
422 }
423 }
424
425 // Given java_mirror that represents a (primitive or reference) type T,
426 // return the "scratch" version that represents the same type T.
427 // Note that if java_mirror will be returned if it's already a
428 // scratch mirror.
429 //
430 // See java_lang_Class::create_scratch_mirror() for more info.
431 oop HeapShared::scratch_java_mirror(oop java_mirror) {
432 assert(java_lang_Class::is_instance(java_mirror), "must be");
433
434 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
435 BasicType bt = (BasicType)i;
436 if (!is_reference_type(bt)) {
437 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
438 return java_mirror;
439 }
440 }
441 }
442
443 if (java_lang_Class::is_primitive(java_mirror)) {
444 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
445 } else {
446 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
447 }
448 }
449
450 oop HeapShared::scratch_java_mirror(BasicType t) {
451 assert((uint)t < T_VOID+1, "range check");
452 assert(!is_reference_type(t), "sanity");
453 return _scratch_basic_type_mirrors[t].resolve();
454 }
455
456 oop HeapShared::scratch_java_mirror(Klass* k) {
457 return _scratch_objects_table->get_oop(k);
458 }
459
460 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
461 _scratch_objects_table->set_oop(k, mirror);
462 }
463
464 void HeapShared::remove_scratch_objects(Klass* k) {
465 // Klass is being deallocated. Java mirror can still be alive, and it should not
466 // point to dead klass. We need to break the link from mirror to the Klass.
467 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
468 oop mirror = _scratch_objects_table->get_oop(k);
469 if (mirror != nullptr) {
470 java_lang_Class::set_klass(mirror, nullptr);
471 }
472 _scratch_objects_table->remove_oop(k);
473 if (k->is_instance_klass()) {
474 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
475 }
476 }
477
478 //TODO: we eventually want a more direct test for these kinds of things.
479 //For example the JVM could record some bit of context from the creation
480 //of the klass, such as who called the hidden class factory. Using
481 //string compares on names is fragile and will break as soon as somebody
482 //changes the names in the JDK code. See discussion in JDK-8342481 for
483 //related ideas about marking AOT-related classes.
484 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
485 return ik->is_hidden() &&
486 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
487 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
488 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
489 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
490 }
491
492 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
493 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
494 }
495
510 if (RegeneratedClasses::is_regenerated_object(ik)) {
511 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
512 precond(orig_ik->is_initialized());
513 orig_mirror = orig_ik->java_mirror();
514 } else {
515 precond(ik->is_initialized());
516 orig_mirror = ik->java_mirror();
517 }
518
519 oop m = scratch_java_mirror(ik);
520 int nfields = 0;
521 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
522 if (fs.access_flags().is_static()) {
523 fieldDescriptor& fd = fs.field_descriptor();
524 int offset = fd.offset();
525 switch (fd.field_type()) {
526 case T_OBJECT:
527 case T_ARRAY:
528 {
529 oop field_obj = orig_mirror->obj_field(offset);
530 if (offset == java_lang_Class::reflection_data_offset()) {
531 // Class::reflectData use SoftReference, which cannot be archived. Set it
532 // to null and it will be recreated at runtime.
533 field_obj = nullptr;
534 }
535 m->obj_field_put(offset, field_obj);
536 if (field_obj != nullptr) {
537 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
538 assert(success, "sanity");
539 }
540 }
541 break;
542 case T_BOOLEAN:
543 m->bool_field_put(offset, orig_mirror->bool_field(offset));
544 break;
545 case T_BYTE:
546 m->byte_field_put(offset, orig_mirror->byte_field(offset));
547 break;
548 case T_SHORT:
549 m->short_field_put(offset, orig_mirror->short_field(offset));
550 break;
551 case T_CHAR:
552 m->char_field_put(offset, orig_mirror->char_field(offset));
553 break;
554 case T_INT:
568 }
569 nfields ++;
570 }
571 }
572
573 oop class_data = java_lang_Class::class_data(orig_mirror);
574 java_lang_Class::set_class_data(m, class_data);
575 if (class_data != nullptr) {
576 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, class_data);
577 assert(success, "sanity");
578 }
579
580 if (log_is_enabled(Debug, aot, init)) {
581 ResourceMark rm;
582 log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
583 ik->is_hidden() ? " (hidden)" : "",
584 ik->is_enum_subclass() ? " (enum)" : "");
585 }
586 }
587
588 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
589 // We need to retain the identity_hash, because it may have been used by some hashtables
590 // in the shared heap.
591 if (!orig_mirror->fast_no_hash_check()) {
592 intptr_t src_hash = orig_mirror->identity_hash();
593 if (UseCompactObjectHeaders) {
594 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
595 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
596 } else {
597 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
598 }
599 assert(scratch_m->mark().is_unlocked(), "sanity");
600
601 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
602 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
603 }
604 }
605
606 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
607 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
608 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
609 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
610 return HeapShared::scratch_resolved_references(src_ik->constants());
611 }
612 }
613 return nullptr;
614 }
615
616 void HeapShared::archive_strings() {
617 oop shared_strings_array = StringTable::init_shared_strings_array();
618 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
619 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
620 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
621 }
622
623 int HeapShared::archive_exception_instance(oop exception) {
624 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
625 assert(success, "sanity");
626 return append_root(exception);
627 }
628
629 void HeapShared::mark_native_pointers(oop orig_obj) {
630 if (java_lang_Class::is_instance(orig_obj)) {
631 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
632 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
633 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
634 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
635 }
636 }
637
638 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
639 CachedOopInfo* info = archived_object_cache()->get(src_obj);
640 assert(info != nullptr, "must be");
641 has_oop_pointers = info->has_oop_pointers();
642 has_native_pointers = info->has_native_pointers();
643 }
644
645 void HeapShared::set_has_native_pointers(oop src_obj) {
646 CachedOopInfo* info = archived_object_cache()->get(src_obj);
647 assert(info != nullptr, "must be");
648 info->set_has_native_pointers();
649 }
650
651 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
652 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
653 void HeapShared::start_scanning_for_oops() {
654 {
655 NoSafepointVerifier nsv;
656
657 // The special subgraph doesn't belong to any class. We use Object_klass() here just
658 // for convenience.
659 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
660
661 // Cache for recording where the archived objects are copied to
662 create_archived_object_cache();
663
664 if (UseCompressedOops || UseG1GC) {
665 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
666 UseCompressedOops ? p2i(CompressedOops::begin()) :
667 p2i((address)G1CollectedHeap::heap()->reserved().start()),
668 UseCompressedOops ? p2i(CompressedOops::end()) :
669 p2i((address)G1CollectedHeap::heap()->reserved().end()));
670 }
671
672 archive_subgraphs();
673 }
674
675 init_seen_objects_table();
676 Universe::archive_exception_instances();
677 }
678
679 void HeapShared::end_scanning_for_oops() {
680 archive_strings();
681 delete_seen_objects_table();
682 }
683
684 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
685 {
686 NoSafepointVerifier nsv;
687 CDSHeapVerifier::verify();
688 check_special_subgraph_classes();
689 }
690
691 StringTable::write_shared_table();
692 ArchiveHeapWriter::write(_pending_roots, heap_info);
693
694 ArchiveBuilder::OtherROAllocMark mark;
695 write_subgraph_info_table();
696 }
697
698 void HeapShared::scan_java_mirror(oop orig_mirror) {
699 oop m = scratch_java_mirror(orig_mirror);
700 if (m != nullptr) { // nullptr if for custom class loader
701 copy_java_mirror_hashcode(orig_mirror, m);
702 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
703 assert(success, "sanity");
704 }
705 }
706
707 void HeapShared::scan_java_class(Klass* orig_k) {
708 scan_java_mirror(orig_k->java_mirror());
709
710 if (orig_k->is_instance_klass()) {
711 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
712 orig_ik->constants()->prepare_resolved_references_for_archiving();
713 objArrayOop rr = get_archived_resolved_references(orig_ik);
714 if (rr != nullptr) {
715 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
716 assert(success, "must be");
717 }
718 }
719 }
720
721 void HeapShared::archive_subgraphs() {
722 assert(CDSConfig::is_dumping_heap(), "must be");
723
1078 which, k->external_name());
1079 FlagSetting fs1(VerifyBeforeGC, true);
1080 FlagSetting fs2(VerifyDuringGC, true);
1081 FlagSetting fs3(VerifyAfterGC, true);
1082 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1083 }
1084 }
1085 }
1086
1087 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1088 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1089 //
1090 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1091 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1092 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1093 void HeapShared::resolve_classes(JavaThread* current) {
1094 assert(CDSConfig::is_using_archive(), "runtime only!");
1095 if (!ArchiveHeapLoader::is_in_use()) {
1096 return; // nothing to do
1097 }
1098 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1099 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1100 }
1101
1102 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1103 for (int i = 0; fields[i].valid(); i++) {
1104 ArchivableStaticFieldInfo* info = &fields[i];
1105 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1106 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1107 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1108 resolve_classes_for_subgraph_of(current, k);
1109 }
1110 }
1111
1112 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1113 JavaThread* THREAD = current;
1114 ExceptionMark em(THREAD);
1115 const ArchivedKlassSubGraphInfoRecord* record =
1116 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1117 if (HAS_PENDING_EXCEPTION) {
1436 };
1437
1438 // Checks if an oop has any non-null oop fields
1439 class PointsToOopsChecker : public BasicOopIterateClosure {
1440 bool _result;
1441
1442 template <class T> void check(T *p) {
1443 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1444 }
1445
1446 public:
1447 PointsToOopsChecker() : _result(false) {}
1448 void do_oop(narrowOop *p) { check(p); }
1449 void do_oop( oop *p) { check(p); }
1450 bool result() { return _result; }
1451 };
1452
1453 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1454 PointsToOopsChecker points_to_oops_checker;
1455 obj->oop_iterate(&points_to_oops_checker);
1456 return CachedOopInfo(referrer, points_to_oops_checker.result());
1457 }
1458
1459 void HeapShared::init_box_classes(TRAPS) {
1460 if (ArchiveHeapLoader::is_in_use()) {
1461 vmClasses::Boolean_klass()->initialize(CHECK);
1462 vmClasses::Character_klass()->initialize(CHECK);
1463 vmClasses::Float_klass()->initialize(CHECK);
1464 vmClasses::Double_klass()->initialize(CHECK);
1465 vmClasses::Byte_klass()->initialize(CHECK);
1466 vmClasses::Short_klass()->initialize(CHECK);
1467 vmClasses::Integer_klass()->initialize(CHECK);
1468 vmClasses::Long_klass()->initialize(CHECK);
1469 vmClasses::Void_klass()->initialize(CHECK);
1470 }
1471 }
1472
1473 // (1) If orig_obj has not been archived yet, archive it.
1474 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1475 // trace all objects that are reachable from it, and make sure these objects are archived.
1476 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1477 // were already archived when this function is called)
1478 bool HeapShared::archive_reachable_objects_from(int level,
1479 KlassSubGraphInfo* subgraph_info,
1480 oop orig_obj) {
1481 assert(orig_obj != nullptr, "must be");
1482 PendingOopStack stack;
1483 stack.push(PendingOop(orig_obj, nullptr, level));
1484
1485 while (stack.length() > 0) {
1486 PendingOop po = stack.pop();
1487 _object_being_archived = po;
1488 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1489 _object_being_archived = PendingOop();
1490
1491 if (!status) {
1492 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1717 verify_subgraph_from(f);
1718 }
1719 }
1720
1721 void HeapShared::verify_subgraph_from(oop orig_obj) {
1722 if (!has_been_archived(orig_obj)) {
1723 // It's OK for the root of a subgraph to be not archived. See comments in
1724 // archive_reachable_objects_from().
1725 return;
1726 }
1727
1728 // Verify that all objects reachable from orig_obj are archived.
1729 init_seen_objects_table();
1730 verify_reachable_objects_from(orig_obj);
1731 delete_seen_objects_table();
1732 }
1733
1734 void HeapShared::verify_reachable_objects_from(oop obj) {
1735 _num_total_verifications ++;
1736 if (java_lang_Class::is_instance(obj)) {
1737 obj = scratch_java_mirror(obj);
1738 assert(obj != nullptr, "must be");
1739 }
1740 if (!has_been_seen_during_subgraph_recording(obj)) {
1741 set_has_been_seen_during_subgraph_recording(obj);
1742 assert(has_been_archived(obj), "must be");
1743 VerifySharedOopClosure walker;
1744 obj->oop_iterate(&walker);
1745 }
1746 }
1747 #endif
1748
1749 void HeapShared::check_special_subgraph_classes() {
1750 if (CDSConfig::is_initing_classes_at_dump_time()) {
1751 // We can have aot-initialized classes (such as Enums) that can reference objects
1752 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1753 // aot-initialize classes that are "safe".
1754 //
1755 // TODO: we need an automatic tool that checks the safety of aot-initialized
1756 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2029
2030 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2031 bool is_full_module_graph) {
2032 _num_total_subgraph_recordings = 0;
2033 _num_total_walked_objs = 0;
2034 _num_total_archived_objs = 0;
2035 _num_total_recorded_klasses = 0;
2036 _num_total_verifications = 0;
2037
2038 // For each class X that has one or more archived fields:
2039 // [1] Dump the subgraph of each archived field
2040 // [2] Create a list of all the class of the objects that can be reached
2041 // by any of these static fields.
2042 // At runtime, these classes are initialized before X's archived fields
2043 // are restored by HeapShared::initialize_from_archived_subgraph().
2044 for (int i = 0; fields[i].valid(); ) {
2045 ArchivableStaticFieldInfo* info = &fields[i];
2046 const char* klass_name = info->klass_name;
2047 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2048
2049 // If you have specified consecutive fields of the same klass in
2050 // fields[], these will be archived in the same
2051 // {start_recording_subgraph ... done_recording_subgraph} pass to
2052 // save time.
2053 for (; fields[i].valid(); i++) {
2054 ArchivableStaticFieldInfo* f = &fields[i];
2055 if (f->klass_name != klass_name) {
2056 break;
2057 }
2058
2059 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2060 f->offset, f->field_name);
2061 }
2062 done_recording_subgraph(info->klass, klass_name);
2063 }
2064
2065 log_info(aot, heap)("Archived subgraph records = %d",
2066 _num_total_subgraph_recordings);
2067 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2068 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2069 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2070
2071 #ifndef PRODUCT
2072 for (int i = 0; fields[i].valid(); i++) {
2073 ArchivableStaticFieldInfo* f = &fields[i];
2074 verify_subgraph_from_static_field(f->klass, f->offset);
2075 }
2076 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2077 #endif
2078 }
2079
2080 // Keep track of the contents of the archived interned string table. This table
2081 // is used only by CDSHeapVerifier.
2082 void HeapShared::add_to_dumped_interned_strings(oop string) {
2083 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2084 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2085 bool created;
2086 _dumped_interned_strings->put_if_absent(string, true, &created);
2087 if (created) {
2088 // Prevent string deduplication from changing the value field to
2089 // something not in the archive.
2090 java_lang_String::set_deduplication_forbidden(string);
2091 _dumped_interned_strings->maybe_grow();
2092 }
2093 }
2094
2095 bool HeapShared::is_dumped_interned_string(oop o) {
2096 return _dumped_interned_strings->get(o) != nullptr;
2097 }
2098
2099 void HeapShared::debug_trace() {
2100 ResourceMark rm;
2101 oop referrer = _object_being_archived.referrer();
2102 if (referrer != nullptr) {
2103 LogStream ls(Log(aot, heap)::error());
2104 ls.print_cr("Reference trace");
2105 CDSHeapVerifier::trace_to_root(&ls, referrer);
2106 }
2107 }
2108
2109 #ifndef PRODUCT
2110 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2111 // region. This way we can quickly relocate all the pointers without using
2112 // BasicOopIterateClosure at runtime.
2113 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2114 void* _start;
2115 BitMap *_oopmap;
2116 int _num_total_oops;
2117 int _num_null_oops;
2118 public:
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotArtifactFinder.hpp"
26 #include "cds/aotCacheAccess.hpp"
27 #include "cds/aotClassInitializer.hpp"
28 #include "cds/aotClassLocation.hpp"
29 #include "cds/aotConstantPoolResolver.hpp"
30 #include "cds/aotLogging.hpp"
31 #include "cds/aotMetaspace.hpp"
32 #include "cds/aotReferenceObjSupport.hpp"
33 #include "cds/archiveBuilder.hpp"
34 #include "cds/archiveHeapLoader.hpp"
35 #include "cds/archiveHeapWriter.hpp"
36 #include "cds/archiveUtils.hpp"
37 #include "cds/cdsConfig.hpp"
38 #include "cds/cdsEnumKlass.hpp"
39 #include "cds/cdsHeapVerifier.hpp"
40 #include "cds/heapShared.hpp"
41 #include "cds/regeneratedClasses.hpp"
42 #include "classfile/classLoaderData.hpp"
43 #include "classfile/javaClasses.inline.hpp"
44 #include "classfile/modules.hpp"
45 #include "classfile/stringTable.hpp"
46 #include "classfile/symbolTable.hpp"
47 #include "classfile/systemDictionary.hpp"
48 #include "classfile/systemDictionaryShared.hpp"
49 #include "classfile/vmClasses.hpp"
50 #include "classfile/vmSymbols.hpp"
51 #include "gc/shared/collectedHeap.hpp"
52 #include "gc/shared/gcLocker.hpp"
53 #include "gc/shared/gcVMOperations.hpp"
54 #include "logging/log.hpp"
55 #include "logging/logStream.hpp"
56 #include "memory/iterator.inline.hpp"
57 #include "memory/resourceArea.hpp"
58 #include "memory/universe.hpp"
59 #include "oops/compressedOops.inline.hpp"
60 #include "oops/fieldStreams.inline.hpp"
61 #include "oops/objArrayOop.inline.hpp"
62 #include "oops/oop.inline.hpp"
63 #include "oops/oopHandle.inline.hpp"
64 #include "oops/typeArrayOop.inline.hpp"
65 #include "prims/jvmtiExport.hpp"
66 #include "runtime/arguments.hpp"
67 #include "runtime/fieldDescriptor.inline.hpp"
68 #include "runtime/init.hpp"
69 #include "runtime/javaCalls.hpp"
70 #include "runtime/mutexLocker.hpp"
71 #include "runtime/safepointVerifiers.hpp"
72 #include "utilities/bitMap.inline.hpp"
73 #include "utilities/copy.hpp"
74 #if INCLUDE_G1GC
75 #include "gc/g1/g1CollectedHeap.hpp"
76 #endif
77
78 #if INCLUDE_CDS_JAVA_HEAP
79
80 struct ArchivableStaticFieldInfo {
81 const char* klass_name;
82 const char* field_name;
83 InstanceKlass* klass;
84 int offset;
85 BasicType type;
86
87 ArchivableStaticFieldInfo(const char* k, const char* f)
88 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {}
89
90 bool valid() {
91 return klass_name != nullptr;
92 }
93 };
94
95 class HeapShared::ContextMark : public StackObj {
96 ResourceMark rm;
97 public:
98 ContextMark(const char* c) : rm{} {
99 _context->push(c);
100 }
101 ~ContextMark() {
102 _context->pop();
103 }
104 };
105
106 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
107
108 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
109 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
110 size_t HeapShared::_total_obj_count;
111 size_t HeapShared::_total_obj_size;
112
113 #ifndef PRODUCT
114 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects"
115 static Array<char>* _archived_ArchiveHeapTestClass = nullptr;
116 static const char* _test_class_name = nullptr;
117 static Klass* _test_class = nullptr;
118 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
119 #endif
120
121
122 //
123 // If you add new entries to the following tables, you should know what you're doing!
124 //
125
126 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
127 {"java/lang/Integer$IntegerCache", "archivedCache"},
128 {"java/lang/Long$LongCache", "archivedCache"},
129 {"java/lang/Byte$ByteCache", "archivedCache"},
130 {"java/lang/Short$ShortCache", "archivedCache"},
131 {"java/lang/Character$CharacterCache", "archivedCache"},
132 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
133 {"sun/util/locale/BaseLocale", "constantBaseLocales"},
134 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
135 {"java/util/ImmutableCollections", "archivedObjects"},
136 {"java/lang/ModuleLayer", "EMPTY_LAYER"},
137 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"},
138 {"jdk/internal/math/FDBigInteger", "archivedCaches"},
139 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking
140
141 #ifndef PRODUCT
142 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass
143 #endif
144 {nullptr, nullptr},
145 };
146
147 // full module graph
148 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
149 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
150 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD},
151 {"java/lang/Module$ArchivedData", "archivedData"},
152 {nullptr, nullptr},
153 };
154
155 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
156 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
157 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr;
158 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr;
159 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
160 int HeapShared::_root_segment_max_size_elems;
161 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
162 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
163
164 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) {
165 for (int i = 0; fields[i].valid(); i++) {
166 if (fields[i].klass == ik) {
167 return true;
168 }
169 }
170 return false;
171 }
172
173 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
174 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
175 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
176 }
177
178 oop HeapShared::CachedOopInfo::orig_referrer() const {
179 return _orig_referrer.resolve();
180 }
181
182 void HeapShared::rehash_archived_object_cache() {
183 if (!CDSConfig::is_dumping_heap()) {
184 return;
185 }
186 assert(SafepointSynchronize::is_at_safepoint() ||
187 JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
188
189 ArchivedObjectCache* new_cache =
190 new (mtClass)ArchivedObjectCache(archived_object_cache()->table_size(), MAX_TABLE_SIZE);
191
192 archived_object_cache()->iterate_all([&](OopHandle o, CachedOopInfo& info) {
193 new_cache->put_when_absent(o, info);
194 });
195
196 delete _archived_object_cache;
197 _archived_object_cache = new_cache;
198 }
199
200 unsigned HeapShared::oop_hash(oop const& p) {
201 assert(SafepointSynchronize::is_at_safepoint() ||
202 JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
203 // Do not call p->identity_hash() as that will update the
204 // object header.
205 return primitive_hash(cast_from_oop<intptr_t>(p));
206 }
207
208 unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
209 return oop_hash(oh.resolve());
210 }
211
212 unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
213 oop o = oh.resolve();
214 if (o == nullptr) {
215 return 0;
216 } else {
217 return o->identity_hash();
218 }
219 }
220
221 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) {
222 return a.resolve() == b.resolve();
223 }
224
225 static void reset_states(oop obj, TRAPS) {
226 Handle h_obj(THREAD, obj);
227 InstanceKlass* klass = InstanceKlass::cast(obj->klass());
228 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates");
229 Symbol* method_sig = vmSymbols::void_method_signature();
230
231 while (klass != nullptr) {
232 Method* method = klass->find_method(method_name, method_sig);
233 if (method != nullptr) {
234 assert(method->is_private(), "must be");
235 if (log_is_enabled(Debug, aot)) {
236 ResourceMark rm(THREAD);
237 log_debug(aot)(" calling %s", method->name_and_sig_as_C_string());
238 }
239 JavaValue result(T_VOID);
240 JavaCalls::call_special(&result, h_obj, klass,
241 method_name, method_sig, CHECK);
242 }
243 klass = klass->super();
244 }
256 // to keep track of resources, etc, loaded by the null class loader.
257 //
258 // Note, this object is non-null, and is not the same as
259 // ClassLoaderData::the_null_class_loader_data()->class_loader(),
260 // which is null.
261 log_debug(aot)("Resetting boot loader");
262 JavaValue result(T_OBJECT);
263 JavaCalls::call_static(&result,
264 vmClasses::jdk_internal_loader_ClassLoaders_klass(),
265 vmSymbols::bootLoader_name(),
266 vmSymbols::void_BuiltinClassLoader_signature(),
267 CHECK);
268 Handle boot_loader(THREAD, result.get_oop());
269 reset_states(boot_loader(), CHECK);
270 }
271
272 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
273
274 bool HeapShared::has_been_archived(oop obj) {
275 assert(CDSConfig::is_dumping_heap(), "dump-time only");
276 OopHandle oh(&obj);
277 return archived_object_cache()->get(oh) != nullptr;
278 }
279
280 int HeapShared::append_root(oop obj) {
281 assert(CDSConfig::is_dumping_heap(), "dump-time only");
282 if (obj != nullptr) {
283 assert(has_been_archived(obj), "must be");
284 }
285 // No GC should happen since we aren't scanning _pending_roots.
286 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
287
288 OopHandle oh(Universe::vm_global(), obj);
289 return _pending_roots->append(oh);
290 }
291
292 objArrayOop HeapShared::root_segment(int segment_idx) {
293 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
294 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
295 } else {
296 assert(CDSConfig::is_using_archive(), "must be");
297 }
298
299 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
300 assert(segment != nullptr, "should have been initialized");
301 return segment;
302 }
303
304 class OrigToScratchObjectTable: public HashTable<OopHandle, OopHandle,
305 36137, // prime number
306 AnyObj::C_HEAP,
307 mtClassShared,
308 HeapShared::oop_handle_hash,
309 HeapShared::oop_handle_equals> {};
310
311 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr;
312
313 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) {
314 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
315 if (_orig_to_scratch_object_table == nullptr) {
316 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable();
317 }
318
319 OopHandle orig_h(Universe::vm_global(), orig_obj);
320 OopHandle scratch_h(Universe::vm_global(), scratch_obj);
321 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h);
322 }
323
324 oop HeapShared::orig_to_scratch_object(oop orig_obj) {
325 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
326 if (_orig_to_scratch_object_table != nullptr) {
327 OopHandle orig(&orig_obj);
328 OopHandle* v = _orig_to_scratch_object_table->get(orig);
329 if (v != nullptr) {
330 return v->resolve();
331 }
332 }
333 return nullptr;
334 }
335
336 // Permanent oops are used to support AOT-compiled methods, which may have in-line references
337 // to Strings and MH oops.
338 //
339 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever)
340 // and are accssed vis AOTCacheAccess::get_archived_object(int).
341 struct PermanentOopInfo {
342 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object
343 int _heap_offset; // Offset of the object from the bottom of the archived heap.
344 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {}
345 };
346
347 class PermanentOopTable: public HashTable<OopHandle, PermanentOopInfo,
348 36137, // prime number
349 AnyObj::C_HEAP,
350 mtClassShared,
351 HeapShared::oop_handle_hash,
352 HeapShared::oop_handle_equals> {};
353
354 static int _dumptime_permanent_oop_count = 0;
355 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr;
356 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr;
357
358 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table,
359 // so we can remember their offset (from the bottom of the archived heap).
360 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) {
361 assert_at_safepoint();
362 if (_dumptime_permanent_oop_table == nullptr) {
363 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable();
364 }
365
366 PermanentOopInfo info(-1, offset);
367 OopHandle oh(Universe::vm_global(), obj);
368 _dumptime_permanent_oop_table->put_when_absent(oh, info);
369 }
370
371 // A permanent index is assigned to an archived object ONLY when
372 // the AOT compiler calls this function.
373 int HeapShared::get_archived_object_permanent_index(oop obj) {
374 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag);
375
376 if (!CDSConfig::is_dumping_heap()) {
377 return -1; // Called by the Leyden old workflow
378 }
379 if (_dumptime_permanent_oop_table == nullptr) {
380 return -1;
381 }
382
383 if (_orig_to_scratch_object_table != nullptr) {
384 OopHandle orig(&obj);
385 OopHandle* v = _orig_to_scratch_object_table->get(orig);
386 if (v != nullptr) {
387 obj = v->resolve();
388 }
389 }
390
391 OopHandle tmp(&obj);
392 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp);
393 if (info == nullptr) {
394 return -1;
395 } else {
396 if (info->_index < 0) {
397 info->_index = _dumptime_permanent_oop_count++;
398 }
399 return info->_index;
400 }
401 }
402
403 oop HeapShared::get_archived_object(int permanent_index) {
404 assert(permanent_index >= 0, "sanity");
405 assert(ArchiveHeapLoader::is_in_use(), "sanity");
406 assert(_runtime_permanent_oops != nullptr, "sanity");
407
408 return _runtime_permanent_oops->at(permanent_index).resolve();
409 }
410
411 // Remember all archived heap objects that have a permanent index.
412 // table[i] = offset of oop whose permanent index is i.
413 void CachedCodeDirectoryInternal::dumptime_init_internal() {
414 const int count = _dumptime_permanent_oop_count;
415 if (count == 0) {
416 // Avoid confusing CDS code with zero-sized tables, just return.
417 log_info(cds)("No permanent oops");
418 _permanent_oop_count = count;
419 _permanent_oop_offsets = nullptr;
420 return;
421 }
422
423 int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int));
424 for (int i = 0; i < count; i++) {
425 table[count] = -1;
426 }
427 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) {
428 int index = info._index;
429 if (index >= 0) {
430 assert(index < count, "sanity");
431 table[index] = info._heap_offset;
432 }
433 return true; // continue
434 });
435
436 for (int i = 0; i < count; i++) {
437 assert(table[i] >= 0, "must be");
438 }
439
440 log_info(cds)("Dumped %d permanent oops", count);
441
442 _permanent_oop_count = count;
443 AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table);
444 }
445
446 // This is called during the bootstrap of the production run, before any GC can happen.
447 // Record each permanent oop in a OopHandle for GC safety.
448 void CachedCodeDirectoryInternal::runtime_init_internal() {
449 int count = _permanent_oop_count;
450 int* table = _permanent_oop_offsets;
451 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>();
452 for (int i = 0; i < count; i++) {
453 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]);
454 OopHandle oh(Universe::vm_global(), obj);
455 _runtime_permanent_oops->append(oh);
456 }
457 };
458
459 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
460 assert(_root_segment_max_size_elems > 0, "sanity");
461
462 // Try to avoid divisions for the common case.
463 if (idx < _root_segment_max_size_elems) {
464 seg_idx = 0;
465 int_idx = idx;
466 } else {
467 seg_idx = idx / _root_segment_max_size_elems;
468 int_idx = idx % _root_segment_max_size_elems;
469 }
470
471 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
472 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
473 }
474
475 // Returns an objArray that contains all the roots of the archived objects
476 oop HeapShared::get_root(int index, bool clear) {
477 assert(index >= 0, "sanity");
478 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
500 }
501 }
502
503 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) {
504 assert(CDSConfig::is_dumping_heap(), "dump-time only");
505
506 assert(!obj->is_stackChunk(), "do not archive stack chunks");
507 if (has_been_archived(obj)) {
508 return true;
509 }
510
511 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
512 log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
513 p2i(obj), obj->size());
514 debug_trace();
515 return false;
516 } else {
517 count_allocation(obj->size());
518 ArchiveHeapWriter::add_source_obj(obj);
519 CachedOopInfo info = make_cached_oop_info(obj, referrer);
520
521 OopHandle oh(Universe::vm_global(), obj);
522 archived_object_cache()->put_when_absent(oh, info);
523 archived_object_cache()->maybe_grow();
524 mark_native_pointers(obj);
525
526 Klass* k = obj->klass();
527 if (k->is_instance_klass()) {
528 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
529 // This ensures that during the production run, whenever Java code sees a cached object
530 // of type X, we know that X is already initialized. (see TODO comment below ...)
531
532 if (InstanceKlass::cast(k)->is_enum_subclass()
533 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
534 // we must store them as AOT-initialized.
535 || (subgraph_info == _dump_time_special_subgraph))
536 // TODO: we do this only for the special subgraph for now. Extending this to
537 // other subgraphs would require more refactoring of the core library (such as
538 // move some initialization logic into runtimeSetup()).
539 //
540 // For the other subgraphs, we have a weaker mechanism to ensure that
541 // all classes in a subgraph are initialized before the subgraph is programmatically
542 // returned from jdk.internal.misc.CDS::initializeFromArchive().
608 OopHandle* handle = get(ptr);
609 if (handle != nullptr) {
610 handle->release(Universe::vm_global());
611 remove(ptr);
612 }
613 }
614 };
615
616 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) {
617 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) {
618 _scratch_objects_table->set_oop(src, dest);
619 }
620 }
621
622 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
623 return (objArrayOop)_scratch_objects_table->get_oop(src);
624 }
625
626 void HeapShared::init_dumping() {
627 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
628 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500);
629 }
630
631 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
632 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
633 BasicType bt = (BasicType)i;
634 if (!is_reference_type(bt)) {
635 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK);
636 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m);
637 track_scratch_object(Universe::java_mirror(bt), m);
638 }
639 }
640 }
641
642 // Given java_mirror that represents a (primitive or reference) type T,
643 // return the "scratch" version that represents the same type T.
644 // Note that if java_mirror will be returned if it's already a
645 // scratch mirror.
646 //
647 // See java_lang_Class::create_scratch_mirror() for more info.
648 oop HeapShared::scratch_java_mirror(oop java_mirror) {
649 assert(java_lang_Class::is_instance(java_mirror), "must be");
650
651 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
652 BasicType bt = (BasicType)i;
653 if (!is_reference_type(bt)) {
654 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) {
655 return java_mirror;
656 }
657 }
658 }
659
660 if (java_lang_Class::is_primitive(java_mirror)) {
661 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror));
662 } else {
663 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror));
664 }
665 }
666
667 oop HeapShared::scratch_java_mirror(BasicType t) {
668 assert((uint)t < T_VOID+1, "range check");
669 assert(!is_reference_type(t), "sanity");
670 return _scratch_basic_type_mirrors[t].resolve();
671 }
672
673 oop HeapShared::scratch_java_mirror(Klass* k) {
674 return _scratch_objects_table->get_oop(k);
675 }
676
677 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) {
678 track_scratch_object(k->java_mirror(), mirror);
679 _scratch_objects_table->set_oop(k, mirror);
680 }
681
682 void HeapShared::remove_scratch_objects(Klass* k) {
683 // Klass is being deallocated. Java mirror can still be alive, and it should not
684 // point to dead klass. We need to break the link from mirror to the Klass.
685 // See how InstanceKlass::deallocate_contents does it for normal mirrors.
686 oop mirror = _scratch_objects_table->get_oop(k);
687 if (mirror != nullptr) {
688 java_lang_Class::set_klass(mirror, nullptr);
689 }
690 _scratch_objects_table->remove_oop(k);
691 if (k->is_instance_klass()) {
692 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants());
693 }
694 if (mirror != nullptr) {
695 OopHandle tmp(&mirror);
696 OopHandle* v = _orig_to_scratch_object_table->get(tmp);
697 if (v != nullptr) {
698 oop scratch_mirror = v->resolve();
699 java_lang_Class::set_klass(scratch_mirror, nullptr);
700 _orig_to_scratch_object_table->remove(tmp);
701 }
702 }
703 }
704
705 //TODO: we eventually want a more direct test for these kinds of things.
706 //For example the JVM could record some bit of context from the creation
707 //of the klass, such as who called the hidden class factory. Using
708 //string compares on names is fragile and will break as soon as somebody
709 //changes the names in the JDK code. See discussion in JDK-8342481 for
710 //related ideas about marking AOT-related classes.
711 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) {
712 return ik->is_hidden() &&
713 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") ||
714 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") ||
715 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") ||
716 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+"));
717 }
718
719 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) {
720 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0);
721 }
722
737 if (RegeneratedClasses::is_regenerated_object(ik)) {
738 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik);
739 precond(orig_ik->is_initialized());
740 orig_mirror = orig_ik->java_mirror();
741 } else {
742 precond(ik->is_initialized());
743 orig_mirror = ik->java_mirror();
744 }
745
746 oop m = scratch_java_mirror(ik);
747 int nfields = 0;
748 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) {
749 if (fs.access_flags().is_static()) {
750 fieldDescriptor& fd = fs.field_descriptor();
751 int offset = fd.offset();
752 switch (fd.field_type()) {
753 case T_OBJECT:
754 case T_ARRAY:
755 {
756 oop field_obj = orig_mirror->obj_field(offset);
757 m->obj_field_put(offset, field_obj);
758 if (field_obj != nullptr) {
759 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj);
760 assert(success, "sanity");
761 }
762 }
763 break;
764 case T_BOOLEAN:
765 m->bool_field_put(offset, orig_mirror->bool_field(offset));
766 break;
767 case T_BYTE:
768 m->byte_field_put(offset, orig_mirror->byte_field(offset));
769 break;
770 case T_SHORT:
771 m->short_field_put(offset, orig_mirror->short_field(offset));
772 break;
773 case T_CHAR:
774 m->char_field_put(offset, orig_mirror->char_field(offset));
775 break;
776 case T_INT:
790 }
791 nfields ++;
792 }
793 }
794
795 oop class_data = java_lang_Class::class_data(orig_mirror);
796 java_lang_Class::set_class_data(m, class_data);
797 if (class_data != nullptr) {
798 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, class_data);
799 assert(success, "sanity");
800 }
801
802 if (log_is_enabled(Debug, aot, init)) {
803 ResourceMark rm;
804 log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
805 ik->is_hidden() ? " (hidden)" : "",
806 ik->is_enum_subclass() ? " (enum)" : "");
807 }
808 }
809
810 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
811 // We need to retain the identity_hash, because it may have been used by some hashtables
812 // in the shared heap.
813 if (!orig_mirror->fast_no_hash_check()) {
814 intptr_t src_hash = orig_mirror->identity_hash();
815 if (UseCompactObjectHeaders) {
816 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
817 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
818 } else {
819 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
820 }
821 assert(scratch_m->mark().is_unlocked(), "sanity");
822
823 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
824 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
825 }
826
827 Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror();
828 if (CDSConfig::is_dumping_reflection_data() &&
829 k != nullptr && k->is_instance_klass() &&
830 java_lang_Class::reflection_data(orig_mirror) != nullptr &&
831 AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) {
832 java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror));
833 }
834 }
835
836 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
837 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
838 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
839 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
840 return HeapShared::scratch_resolved_references(src_ik->constants());
841 }
842 }
843 return nullptr;
844 }
845
846 void HeapShared::archive_strings() {
847 oop shared_strings_array = StringTable::init_shared_strings_array();
848 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
849 assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
850 StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
851 }
852
853 int HeapShared::archive_exception_instance(oop exception) {
854 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception);
855 assert(success, "sanity");
856 return append_root(exception);
857 }
858
859 void HeapShared::mark_native_pointers(oop orig_obj) {
860 if (java_lang_Class::is_instance(orig_obj)) {
861 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
862 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
863 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
864 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
865 }
866 }
867
868 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
869 OopHandle oh(&src_obj);
870 CachedOopInfo* info = archived_object_cache()->get(oh);
871 assert(info != nullptr, "must be");
872 has_oop_pointers = info->has_oop_pointers();
873 has_native_pointers = info->has_native_pointers();
874 }
875
876 void HeapShared::set_has_native_pointers(oop src_obj) {
877 OopHandle oh(&src_obj);
878 CachedOopInfo* info = archived_object_cache()->get(oh);
879 assert(info != nullptr, "must be");
880 info->set_has_native_pointers();
881 }
882
883 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that
884 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder.
885 void HeapShared::start_scanning_for_oops() {
886 {
887 NoSafepointVerifier nsv;
888
889 // The special subgraph doesn't belong to any class. We use Object_klass() here just
890 // for convenience.
891 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false);
892 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250);
893
894 // Cache for recording where the archived objects are copied to
895 create_archived_object_cache();
896
897 if (UseCompressedOops || UseG1GC) {
898 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
899 UseCompressedOops ? p2i(CompressedOops::begin()) :
900 p2i((address)G1CollectedHeap::heap()->reserved().start()),
901 UseCompressedOops ? p2i(CompressedOops::end()) :
902 p2i((address)G1CollectedHeap::heap()->reserved().end()));
903 }
904
905 archive_subgraphs();
906 }
907
908 init_seen_objects_table();
909 Universe::archive_exception_instances();
910 }
911
912 void HeapShared::end_scanning_for_oops() {
913 archive_strings();
914 delete_seen_objects_table();
915 }
916
917 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
918 {
919 NoSafepointVerifier nsv;
920 if (!SkipArchiveHeapVerification) {
921 CDSHeapVerifier::verify();
922 }
923 check_special_subgraph_classes();
924 }
925
926 StringTable::write_shared_table();
927 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length());
928 for (int i = 0; i < _pending_roots->length(); i++) {
929 roots->append(_pending_roots->at(i).resolve());
930 }
931 ArchiveHeapWriter::write(roots, heap_info);
932 delete roots;
933
934 ArchiveBuilder::OtherROAllocMark mark;
935 write_subgraph_info_table();
936 }
937
938 void HeapShared::scan_java_mirror(oop orig_mirror) {
939 oop m = scratch_java_mirror(orig_mirror);
940 if (m != nullptr) { // nullptr if for custom class loader
941 copy_java_mirror(orig_mirror, m);
942 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m);
943 assert(success, "sanity");
944
945 oop extra;
946 if ((extra = java_lang_Class::reflection_data(m)) != nullptr) {
947 success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra);
948 assert(success, "sanity");
949 }
950 }
951 }
952
953 void HeapShared::scan_java_class(Klass* orig_k) {
954 scan_java_mirror(orig_k->java_mirror());
955
956 if (orig_k->is_instance_klass()) {
957 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k);
958 orig_ik->constants()->prepare_resolved_references_for_archiving();
959 objArrayOop rr = get_archived_resolved_references(orig_ik);
960 if (rr != nullptr) {
961 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr);
962 assert(success, "must be");
963 }
964 }
965 }
966
967 void HeapShared::archive_subgraphs() {
968 assert(CDSConfig::is_dumping_heap(), "must be");
969
1324 which, k->external_name());
1325 FlagSetting fs1(VerifyBeforeGC, true);
1326 FlagSetting fs2(VerifyDuringGC, true);
1327 FlagSetting fs3(VerifyAfterGC, true);
1328 Universe::heap()->collect(GCCause::_java_lang_system_gc);
1329 }
1330 }
1331 }
1332
1333 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots()
1334 // have a valid klass. I.e., oopDesc::klass() must have already been resolved.
1335 //
1336 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI
1337 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In
1338 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
1339 void HeapShared::resolve_classes(JavaThread* current) {
1340 assert(CDSConfig::is_using_archive(), "runtime only!");
1341 if (!ArchiveHeapLoader::is_in_use()) {
1342 return; // nothing to do
1343 }
1344
1345 if (!CDSConfig::is_using_aot_linked_classes()) {
1346 assert( _run_time_special_subgraph != nullptr, "must be");
1347 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses();
1348 if (klasses != nullptr) {
1349 for (int i = 0; i < klasses->length(); i++) {
1350 Klass* k = klasses->at(i);
1351 ExceptionMark em(current); // no exception can happen here
1352 resolve_or_init(k, /*do_init*/false, current);
1353 }
1354 }
1355 }
1356
1357 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
1358 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
1359 }
1360
1361 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
1362 for (int i = 0; fields[i].valid(); i++) {
1363 ArchivableStaticFieldInfo* info = &fields[i];
1364 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name);
1365 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name);
1366 assert(k != nullptr && k->defined_by_boot_loader(), "sanity");
1367 resolve_classes_for_subgraph_of(current, k);
1368 }
1369 }
1370
1371 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) {
1372 JavaThread* THREAD = current;
1373 ExceptionMark em(THREAD);
1374 const ArchivedKlassSubGraphInfoRecord* record =
1375 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD);
1376 if (HAS_PENDING_EXCEPTION) {
1695 };
1696
1697 // Checks if an oop has any non-null oop fields
1698 class PointsToOopsChecker : public BasicOopIterateClosure {
1699 bool _result;
1700
1701 template <class T> void check(T *p) {
1702 _result |= (HeapAccess<>::oop_load(p) != nullptr);
1703 }
1704
1705 public:
1706 PointsToOopsChecker() : _result(false) {}
1707 void do_oop(narrowOop *p) { check(p); }
1708 void do_oop( oop *p) { check(p); }
1709 bool result() { return _result; }
1710 };
1711
1712 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) {
1713 PointsToOopsChecker points_to_oops_checker;
1714 obj->oop_iterate(&points_to_oops_checker);
1715 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result());
1716 }
1717
1718 void HeapShared::init_box_classes(TRAPS) {
1719 if (ArchiveHeapLoader::is_in_use()) {
1720 vmClasses::Boolean_klass()->initialize(CHECK);
1721 vmClasses::Character_klass()->initialize(CHECK);
1722 vmClasses::Float_klass()->initialize(CHECK);
1723 vmClasses::Double_klass()->initialize(CHECK);
1724 vmClasses::Byte_klass()->initialize(CHECK);
1725 vmClasses::Short_klass()->initialize(CHECK);
1726 vmClasses::Integer_klass()->initialize(CHECK);
1727 vmClasses::Long_klass()->initialize(CHECK);
1728 vmClasses::Void_klass()->initialize(CHECK);
1729 }
1730 }
1731
1732 void HeapShared::exit_on_error() {
1733 if (_context != nullptr) {
1734 ResourceMark rm;
1735 LogStream ls(Log(cds, heap)::error());
1736 ls.print_cr("Context");
1737 for (int i = 0; i < _context->length(); i++) {
1738 const char* s = _context->at(i);
1739 ls.print_cr("- %s", s);
1740 }
1741 }
1742 debug_trace();
1743 AOTMetaspace::unrecoverable_writing_error();
1744 }
1745
1746 // (1) If orig_obj has not been archived yet, archive it.
1747 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called),
1748 // trace all objects that are reachable from it, and make sure these objects are archived.
1749 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that
1750 // were already archived when this function is called)
1751 bool HeapShared::archive_reachable_objects_from(int level,
1752 KlassSubGraphInfo* subgraph_info,
1753 oop orig_obj) {
1754 assert(orig_obj != nullptr, "must be");
1755 PendingOopStack stack;
1756 stack.push(PendingOop(orig_obj, nullptr, level));
1757
1758 while (stack.length() > 0) {
1759 PendingOop po = stack.pop();
1760 _object_being_archived = po;
1761 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer());
1762 _object_being_archived = PendingOop();
1763
1764 if (!status) {
1765 // Don't archive a subgraph root that's too big. For archives static fields, that's OK
1990 verify_subgraph_from(f);
1991 }
1992 }
1993
1994 void HeapShared::verify_subgraph_from(oop orig_obj) {
1995 if (!has_been_archived(orig_obj)) {
1996 // It's OK for the root of a subgraph to be not archived. See comments in
1997 // archive_reachable_objects_from().
1998 return;
1999 }
2000
2001 // Verify that all objects reachable from orig_obj are archived.
2002 init_seen_objects_table();
2003 verify_reachable_objects_from(orig_obj);
2004 delete_seen_objects_table();
2005 }
2006
2007 void HeapShared::verify_reachable_objects_from(oop obj) {
2008 _num_total_verifications ++;
2009 if (java_lang_Class::is_instance(obj)) {
2010 Klass* k = java_lang_Class::as_Klass(obj);
2011 if (RegeneratedClasses::has_been_regenerated(k)) {
2012 k = RegeneratedClasses::get_regenerated_object(k);
2013 obj = k->java_mirror();
2014 }
2015 obj = scratch_java_mirror(obj);
2016 assert(obj != nullptr, "must be");
2017 }
2018 if (!has_been_seen_during_subgraph_recording(obj)) {
2019 set_has_been_seen_during_subgraph_recording(obj);
2020 assert(has_been_archived(obj), "must be");
2021 VerifySharedOopClosure walker;
2022 obj->oop_iterate(&walker);
2023 }
2024 }
2025 #endif
2026
2027 void HeapShared::check_special_subgraph_classes() {
2028 if (CDSConfig::is_initing_classes_at_dump_time()) {
2029 // We can have aot-initialized classes (such as Enums) that can reference objects
2030 // of arbitrary types. Currently, we trust the JEP 483 implementation to only
2031 // aot-initialize classes that are "safe".
2032 //
2033 // TODO: we need an automatic tool that checks the safety of aot-initialized
2034 // classes (when we extend the set of aot-initialized classes beyond JEP 483)
2307
2308 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2309 bool is_full_module_graph) {
2310 _num_total_subgraph_recordings = 0;
2311 _num_total_walked_objs = 0;
2312 _num_total_archived_objs = 0;
2313 _num_total_recorded_klasses = 0;
2314 _num_total_verifications = 0;
2315
2316 // For each class X that has one or more archived fields:
2317 // [1] Dump the subgraph of each archived field
2318 // [2] Create a list of all the class of the objects that can be reached
2319 // by any of these static fields.
2320 // At runtime, these classes are initialized before X's archived fields
2321 // are restored by HeapShared::initialize_from_archived_subgraph().
2322 for (int i = 0; fields[i].valid(); ) {
2323 ArchivableStaticFieldInfo* info = &fields[i];
2324 const char* klass_name = info->klass_name;
2325 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2326
2327 ContextMark cm(klass_name);
2328 // If you have specified consecutive fields of the same klass in
2329 // fields[], these will be archived in the same
2330 // {start_recording_subgraph ... done_recording_subgraph} pass to
2331 // save time.
2332 for (; fields[i].valid(); i++) {
2333 ArchivableStaticFieldInfo* f = &fields[i];
2334 if (f->klass_name != klass_name) {
2335 break;
2336 }
2337
2338 ContextMark cm(f->field_name);
2339 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2340 f->offset, f->field_name);
2341 }
2342 done_recording_subgraph(info->klass, klass_name);
2343 }
2344
2345 log_info(aot, heap)("Archived subgraph records = %d",
2346 _num_total_subgraph_recordings);
2347 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs);
2348 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs);
2349 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses);
2350
2351 #ifndef PRODUCT
2352 for (int i = 0; fields[i].valid(); i++) {
2353 ArchivableStaticFieldInfo* f = &fields[i];
2354 verify_subgraph_from_static_field(f->klass, f->offset);
2355 }
2356 log_info(aot, heap)(" Verified %d references", _num_total_verifications);
2357 #endif
2358 }
2359
2360 // Keep track of the contents of the archived interned string table. This table
2361 // is used only by CDSHeapVerifier.
2362 void HeapShared::add_to_dumped_interned_strings(oop string) {
2363 assert_at_safepoint(); // DumpedInternedStrings uses raw oops
2364 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
2365 bool created;
2366 _dumped_interned_strings->put_if_absent(string, true, &created);
2367 if (created) {
2368 // Prevent string deduplication from changing the value field to
2369 // something not in the archive.
2370 java_lang_String::set_deduplication_forbidden(string);
2371 _dumped_interned_strings->maybe_grow();
2372 }
2373 }
2374
2375 bool HeapShared::is_dumped_interned_string(oop o) {
2376 return _dumped_interned_strings->get(o) != nullptr;
2377 }
2378
2379 // These tables should be used only within the CDS safepoint, so
2380 // delete them before we exit the safepoint. Otherwise the table will
2381 // contain bad oops after a GC.
2382 void HeapShared::delete_tables_with_raw_oops() {
2383 assert(_seen_objects_table == nullptr, "should have been deleted");
2384
2385 delete _dumped_interned_strings;
2386 _dumped_interned_strings = nullptr;
2387
2388 ArchiveHeapWriter::delete_tables_with_raw_oops();
2389 }
2390
2391 void HeapShared::debug_trace() {
2392 ResourceMark rm;
2393 oop referrer = _object_being_archived.referrer();
2394 if (referrer != nullptr) {
2395 LogStream ls(Log(aot, heap)::error());
2396 ls.print_cr("Reference trace");
2397 CDSHeapVerifier::trace_to_root(&ls, referrer);
2398 }
2399 }
2400
2401 #ifndef PRODUCT
2402 // At dump-time, find the location of all the non-null oop pointers in an archived heap
2403 // region. This way we can quickly relocate all the pointers without using
2404 // BasicOopIterateClosure at runtime.
2405 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure {
2406 void* _start;
2407 BitMap *_oopmap;
2408 int _num_total_oops;
2409 int _num_null_oops;
2410 public:
|