577 assert(success, "sanity");
578 }
579
580 if (log_is_enabled(Debug, aot, init)) {
581 ResourceMark rm;
582 log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
583 ik->is_hidden() ? " (hidden)" : "",
584 ik->is_enum_subclass() ? " (enum)" : "");
585 }
586 }
587
588 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
589 // We need to retain the identity_hash, because it may have been used by some hashtables
590 // in the shared heap.
591 if (!orig_mirror->fast_no_hash_check()) {
592 intptr_t src_hash = orig_mirror->identity_hash();
593 if (UseCompactObjectHeaders) {
594 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
595 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
596 } else {
597 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
598 }
599 assert(scratch_m->mark().is_unlocked(), "sanity");
600
601 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
602 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
603 }
604 }
605
606 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
607 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
608 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
609 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
610 return HeapShared::scratch_resolved_references(src_ik->constants());
611 }
612 }
613 return nullptr;
614 }
615
616 void HeapShared::archive_strings() {
1285 return record;
1286 }
1287
1288 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1289 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name);
1290 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1291 if (k == nullptr) {
1292 return;
1293 }
1294 assert(k->defined_by_boot_loader(), "sanity");
1295 resolve_or_init(k, false, CHECK);
1296 if (do_init) {
1297 resolve_or_init(k, true, CHECK);
1298 }
1299 }
1300
1301 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1302 if (!do_init) {
1303 if (k->class_loader_data() == nullptr) {
1304 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1305 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1306 }
1307 } else {
1308 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1309 if (k->is_instance_klass()) {
1310 InstanceKlass* ik = InstanceKlass::cast(k);
1311 ik->initialize(CHECK);
1312 } else if (k->is_objArray_klass()) {
1313 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1314 oak->initialize(CHECK);
1315 }
1316 }
1317 }
1318
1319 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1320 verify_the_heap(k, "before");
1321
1322 // Load the subgraph entry fields from the record and store them back to
1323 // the corresponding fields within the mirror.
1324 oop m = k->java_mirror();
1325 Array<int>* entry_field_records = record->entry_field_records();
2027 }
2028 }
2029
2030 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2031 bool is_full_module_graph) {
2032 _num_total_subgraph_recordings = 0;
2033 _num_total_walked_objs = 0;
2034 _num_total_archived_objs = 0;
2035 _num_total_recorded_klasses = 0;
2036 _num_total_verifications = 0;
2037
2038 // For each class X that has one or more archived fields:
2039 // [1] Dump the subgraph of each archived field
2040 // [2] Create a list of all the class of the objects that can be reached
2041 // by any of these static fields.
2042 // At runtime, these classes are initialized before X's archived fields
2043 // are restored by HeapShared::initialize_from_archived_subgraph().
2044 for (int i = 0; fields[i].valid(); ) {
2045 ArchivableStaticFieldInfo* info = &fields[i];
2046 const char* klass_name = info->klass_name;
2047 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2048
2049 // If you have specified consecutive fields of the same klass in
2050 // fields[], these will be archived in the same
2051 // {start_recording_subgraph ... done_recording_subgraph} pass to
2052 // save time.
2053 for (; fields[i].valid(); i++) {
2054 ArchivableStaticFieldInfo* f = &fields[i];
2055 if (f->klass_name != klass_name) {
2056 break;
2057 }
2058
2059 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2060 f->offset, f->field_name);
2061 }
2062 done_recording_subgraph(info->klass, klass_name);
2063 }
2064
2065 log_info(aot, heap)("Archived subgraph records = %d",
2066 _num_total_subgraph_recordings);
|
577 assert(success, "sanity");
578 }
579
580 if (log_is_enabled(Debug, aot, init)) {
581 ResourceMark rm;
582 log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
583 ik->is_hidden() ? " (hidden)" : "",
584 ik->is_enum_subclass() ? " (enum)" : "");
585 }
586 }
587
588 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) {
589 // We need to retain the identity_hash, because it may have been used by some hashtables
590 // in the shared heap.
591 if (!orig_mirror->fast_no_hash_check()) {
592 intptr_t src_hash = orig_mirror->identity_hash();
593 if (UseCompactObjectHeaders) {
594 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
595 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
596 } else {
597 // For valhalla, the prototype header is the same as markWord::prototype();
598 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
599 }
600 assert(scratch_m->mark().is_unlocked(), "sanity");
601
602 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
603 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
604 }
605 }
606
607 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
608 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
609 objArrayOop rr = src_ik->constants()->resolved_references_or_null();
610 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
611 return HeapShared::scratch_resolved_references(src_ik->constants());
612 }
613 }
614 return nullptr;
615 }
616
617 void HeapShared::archive_strings() {
1286 return record;
1287 }
1288
1289 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1290 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name);
1291 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1292 if (k == nullptr) {
1293 return;
1294 }
1295 assert(k->defined_by_boot_loader(), "sanity");
1296 resolve_or_init(k, false, CHECK);
1297 if (do_init) {
1298 resolve_or_init(k, true, CHECK);
1299 }
1300 }
1301
1302 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1303 if (!do_init) {
1304 if (k->class_loader_data() == nullptr) {
1305 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1306 if (resolved_k->is_array_klass()) {
1307 assert(resolved_k == k || resolved_k == k->super(), "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1308 } else {
1309 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1310 }
1311 }
1312 } else {
1313 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1314 if (k->is_instance_klass()) {
1315 InstanceKlass* ik = InstanceKlass::cast(k);
1316 ik->initialize(CHECK);
1317 } else if (k->is_objArray_klass()) {
1318 ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1319 oak->initialize(CHECK);
1320 }
1321 }
1322 }
1323
1324 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1325 verify_the_heap(k, "before");
1326
1327 // Load the subgraph entry fields from the record and store them back to
1328 // the corresponding fields within the mirror.
1329 oop m = k->java_mirror();
1330 Array<int>* entry_field_records = record->entry_field_records();
2032 }
2033 }
2034
2035 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2036 bool is_full_module_graph) {
2037 _num_total_subgraph_recordings = 0;
2038 _num_total_walked_objs = 0;
2039 _num_total_archived_objs = 0;
2040 _num_total_recorded_klasses = 0;
2041 _num_total_verifications = 0;
2042
2043 // For each class X that has one or more archived fields:
2044 // [1] Dump the subgraph of each archived field
2045 // [2] Create a list of all the class of the objects that can be reached
2046 // by any of these static fields.
2047 // At runtime, these classes are initialized before X's archived fields
2048 // are restored by HeapShared::initialize_from_archived_subgraph().
2049 for (int i = 0; fields[i].valid(); ) {
2050 ArchivableStaticFieldInfo* info = &fields[i];
2051 const char* klass_name = info->klass_name;
2052
2053 if (CDSConfig::is_valhalla_preview() && strcmp(klass_name, "jdk/internal/module/ArchivedModuleGraph") == 0) {
2054 // FIXME -- ArchivedModuleGraph doesn't work when java.base is patched with valhalla classes.
2055 i++;
2056 continue;
2057 }
2058
2059 start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2060
2061 // If you have specified consecutive fields of the same klass in
2062 // fields[], these will be archived in the same
2063 // {start_recording_subgraph ... done_recording_subgraph} pass to
2064 // save time.
2065 for (; fields[i].valid(); i++) {
2066 ArchivableStaticFieldInfo* f = &fields[i];
2067 if (f->klass_name != klass_name) {
2068 break;
2069 }
2070
2071 archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2072 f->offset, f->field_name);
2073 }
2074 done_recording_subgraph(info->klass, klass_name);
2075 }
2076
2077 log_info(aot, heap)("Archived subgraph records = %d",
2078 _num_total_subgraph_recordings);
|