< prev index next >

src/hotspot/share/cds/heapShared.cpp

Print this page

 790     assert(success, "sanity");
 791   }
 792 
 793   if (log_is_enabled(Debug, aot, init)) {
 794     ResourceMark rm;
 795     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 796                          ik->is_hidden() ? " (hidden)" : "",
 797                          ik->is_enum_subclass() ? " (enum)" : "");
 798   }
 799 }
 800 
 801 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 802   // We need to retain the identity_hash, because it may have been used by some hashtables
 803   // in the shared heap.
 804   if (!orig_mirror->fast_no_hash_check()) {
 805     intptr_t src_hash = orig_mirror->identity_hash();
 806     if (UseCompactObjectHeaders) {
 807       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 808       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 809     } else {

 810       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 811     }
 812     assert(scratch_m->mark().is_unlocked(), "sanity");
 813 
 814     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 815     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 816   }
 817 



















 818   if (CDSConfig::is_dumping_aot_linked_classes()) {
 819     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 820     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 821   }
 822 }
 823 
 824 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 825   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 826     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 827     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 828       return HeapShared::scratch_resolved_references(src_ik->constants());
 829     }
 830   }
 831   return nullptr;
 832 }
 833 
 834 void HeapShared::archive_strings() {
 835   assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
 836   oop shared_strings_array = StringTable::init_shared_strings_array();
 837   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);

1497   return record;
1498 }
1499 
1500 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1501   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1502   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1503   if (k == nullptr) {
1504     return;
1505   }
1506   assert(k->defined_by_boot_loader(), "sanity");
1507   resolve_or_init(k, false, CHECK);
1508   if (do_init) {
1509     resolve_or_init(k, true, CHECK);
1510   }
1511 }
1512 
1513 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1514   if (!do_init) {
1515     if (k->class_loader_data() == nullptr) {
1516       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1517       assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");




1518     }
1519   } else {
1520     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1521     if (k->is_instance_klass()) {
1522       InstanceKlass* ik = InstanceKlass::cast(k);
1523       ik->initialize(CHECK);
1524     } else if (k->is_objArray_klass()) {
1525       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1526       oak->initialize(CHECK);
1527     }
1528   }
1529 }
1530 
1531 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1532   verify_the_heap(k, "before");
1533 
1534   Array<int>* entry_field_records = record->entry_field_records();
1535   if (entry_field_records != nullptr) {
1536     int efr_len = entry_field_records->length();
1537     assert(efr_len % 2 == 0, "sanity");

1959   }
1960 }
1961 #endif
1962 
1963 void HeapShared::check_special_subgraph_classes() {
1964   if (CDSConfig::is_dumping_aot_linked_classes()) {
1965     // We can have aot-initialized classes (such as Enums) that can reference objects
1966     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1967     // aot-initialize classes that are "safe".
1968     //
1969     // TODO: we need an automatic tool that checks the safety of aot-initialized
1970     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
1971     return;
1972   } else {
1973     // In this case, the special subgraph should contain a few specific types
1974     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
1975     int num = klasses->length();
1976     for (int i = 0; i < num; i++) {
1977       Klass* subgraph_k = klasses->at(i);
1978       Symbol* name = subgraph_k->name();
1979       if (subgraph_k->is_instance_klass() &&

1980           name != vmSymbols::java_lang_Class() &&
1981           name != vmSymbols::java_lang_String() &&
1982           name != vmSymbols::java_lang_ArithmeticException() &&
1983           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
1984           name != vmSymbols::java_lang_ArrayStoreException() &&
1985           name != vmSymbols::java_lang_ClassCastException() &&
1986           name != vmSymbols::java_lang_InternalError() &&
1987           name != vmSymbols::java_lang_NullPointerException() &&
1988           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
1989         ResourceMark rm;
1990         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
1991       }
1992     }
1993   }
1994 }
1995 
1996 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
1997 HeapShared::PendingOop HeapShared::_object_being_archived;
1998 size_t HeapShared::_num_new_walked_objs;
1999 size_t HeapShared::_num_new_archived_objs;

2251   }
2252 }
2253 
2254 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2255                                           bool is_full_module_graph) {
2256   _num_total_subgraph_recordings = 0;
2257   _num_total_walked_objs = 0;
2258   _num_total_archived_objs = 0;
2259   _num_total_recorded_klasses = 0;
2260   _num_total_verifications = 0;
2261 
2262   // For each class X that has one or more archived fields:
2263   // [1] Dump the subgraph of each archived field
2264   // [2] Create a list of all the class of the objects that can be reached
2265   //     by any of these static fields.
2266   //     At runtime, these classes are initialized before X's archived fields
2267   //     are restored by HeapShared::initialize_from_archived_subgraph().
2268   for (int i = 0; fields[i].valid(); ) {
2269     ArchivableStaticFieldInfo* info = &fields[i];
2270     const char* klass_name = info->klass_name;

2271     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2272 
2273     // If you have specified consecutive fields of the same klass in
2274     // fields[], these will be archived in the same
2275     // {start_recording_subgraph ... done_recording_subgraph} pass to
2276     // save time.
2277     for (; fields[i].valid(); i++) {
2278       ArchivableStaticFieldInfo* f = &fields[i];
2279       if (f->klass_name != klass_name) {
2280         break;
2281       }
2282 
2283       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2284                                                   f->offset, f->field_name);
2285     }
2286     done_recording_subgraph(info->klass, klass_name);
2287   }
2288 
2289   log_info(aot, heap)("Archived subgraph records = %zu",
2290                       _num_total_subgraph_recordings);

 790     assert(success, "sanity");
 791   }
 792 
 793   if (log_is_enabled(Debug, aot, init)) {
 794     ResourceMark rm;
 795     log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(),
 796                          ik->is_hidden() ? " (hidden)" : "",
 797                          ik->is_enum_subclass() ? " (enum)" : "");
 798   }
 799 }
 800 
 801 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
 802   // We need to retain the identity_hash, because it may have been used by some hashtables
 803   // in the shared heap.
 804   if (!orig_mirror->fast_no_hash_check()) {
 805     intptr_t src_hash = orig_mirror->identity_hash();
 806     if (UseCompactObjectHeaders) {
 807       narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass());
 808       scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
 809     } else {
 810       // For valhalla, the prototype header is the same as markWord::prototype();
 811       scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash));
 812     }
 813     assert(scratch_m->mark().is_unlocked(), "sanity");
 814 
 815     DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash());
 816     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
 817   }
 818 
 819   Klass* k = java_lang_Class::as_Klass(orig_mirror);
 820   if (k != nullptr && k->is_instance_klass()) {
 821     InstanceKlass* ik = InstanceKlass::cast(k);
 822 
 823     if (ik->is_inline_klass() && ik->is_initialized()) {
 824       // Only concrete value classes need the null_reset field
 825       InlineKlass* ilk = InlineKlass::cast(k);
 826       if (ilk->supports_nullable_layouts()) {
 827         scratch_m->obj_field_put(ilk->null_reset_value_offset(), ilk->null_reset_value());
 828       }
 829     }
 830 
 831     if (ik->has_acmp_maps_offset()) {
 832       int maps_offset = ik->acmp_maps_offset();
 833       oop maps = orig_mirror->obj_field(maps_offset);
 834       scratch_m->obj_field_put(maps_offset, maps);
 835     }
 836   }
 837 
 838   if (CDSConfig::is_dumping_aot_linked_classes()) {
 839     java_lang_Class::set_module(scratch_m, java_lang_Class::module(orig_mirror));
 840     java_lang_Class::set_protection_domain(scratch_m, java_lang_Class::protection_domain(orig_mirror));
 841   }
 842 }
 843 
 844 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
 845   if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
 846     objArrayOop rr = src_ik->constants()->resolved_references_or_null();
 847     if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
 848       return HeapShared::scratch_resolved_references(src_ik->constants());
 849     }
 850   }
 851   return nullptr;
 852 }
 853 
 854 void HeapShared::archive_strings() {
 855   assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
 856   oop shared_strings_array = StringTable::init_shared_strings_array();
 857   bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);

1517   return record;
1518 }
1519 
1520 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) {
1521   TempNewSymbol klass_name_sym =  SymbolTable::new_symbol(klass_name);
1522   InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym);
1523   if (k == nullptr) {
1524     return;
1525   }
1526   assert(k->defined_by_boot_loader(), "sanity");
1527   resolve_or_init(k, false, CHECK);
1528   if (do_init) {
1529     resolve_or_init(k, true, CHECK);
1530   }
1531 }
1532 
1533 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
1534   if (!do_init) {
1535     if (k->class_loader_data() == nullptr) {
1536       Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK);
1537       if (resolved_k->is_array_klass()) {
1538         assert(resolved_k == k || resolved_k == k->super(), "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1539       } else {
1540         assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook");
1541       }
1542     }
1543   } else {
1544     assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes");
1545     if (k->is_instance_klass()) {
1546       InstanceKlass* ik = InstanceKlass::cast(k);
1547       ik->initialize(CHECK);
1548     } else if (k->is_objArray_klass()) {
1549       ObjArrayKlass* oak = ObjArrayKlass::cast(k);
1550       oak->initialize(CHECK);
1551     }
1552   }
1553 }
1554 
1555 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
1556   verify_the_heap(k, "before");
1557 
1558   Array<int>* entry_field_records = record->entry_field_records();
1559   if (entry_field_records != nullptr) {
1560     int efr_len = entry_field_records->length();
1561     assert(efr_len % 2 == 0, "sanity");

1983   }
1984 }
1985 #endif
1986 
1987 void HeapShared::check_special_subgraph_classes() {
1988   if (CDSConfig::is_dumping_aot_linked_classes()) {
1989     // We can have aot-initialized classes (such as Enums) that can reference objects
1990     // of arbitrary types. Currently, we trust the JEP 483 implementation to only
1991     // aot-initialize classes that are "safe".
1992     //
1993     // TODO: we need an automatic tool that checks the safety of aot-initialized
1994     // classes (when we extend the set of aot-initialized classes beyond JEP 483)
1995     return;
1996   } else {
1997     // In this case, the special subgraph should contain a few specific types
1998     GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses();
1999     int num = klasses->length();
2000     for (int i = 0; i < num; i++) {
2001       Klass* subgraph_k = klasses->at(i);
2002       Symbol* name = subgraph_k->name();
2003 
2004       if (subgraph_k->is_identity_class() &&
2005           name != vmSymbols::java_lang_Class() &&
2006           name != vmSymbols::java_lang_String() &&
2007           name != vmSymbols::java_lang_ArithmeticException() &&
2008           name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() &&
2009           name != vmSymbols::java_lang_ArrayStoreException() &&
2010           name != vmSymbols::java_lang_ClassCastException() &&
2011           name != vmSymbols::java_lang_InternalError() &&
2012           name != vmSymbols::java_lang_NullPointerException() &&
2013           name != vmSymbols::jdk_internal_vm_PreemptedException()) {
2014         ResourceMark rm;
2015         fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name());
2016       }
2017     }
2018   }
2019 }
2020 
2021 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr;
2022 HeapShared::PendingOop HeapShared::_object_being_archived;
2023 size_t HeapShared::_num_new_walked_objs;
2024 size_t HeapShared::_num_new_archived_objs;

2276   }
2277 }
2278 
2279 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
2280                                           bool is_full_module_graph) {
2281   _num_total_subgraph_recordings = 0;
2282   _num_total_walked_objs = 0;
2283   _num_total_archived_objs = 0;
2284   _num_total_recorded_klasses = 0;
2285   _num_total_verifications = 0;
2286 
2287   // For each class X that has one or more archived fields:
2288   // [1] Dump the subgraph of each archived field
2289   // [2] Create a list of all the class of the objects that can be reached
2290   //     by any of these static fields.
2291   //     At runtime, these classes are initialized before X's archived fields
2292   //     are restored by HeapShared::initialize_from_archived_subgraph().
2293   for (int i = 0; fields[i].valid(); ) {
2294     ArchivableStaticFieldInfo* info = &fields[i];
2295     const char* klass_name = info->klass_name;
2296 
2297     start_recording_subgraph(info->klass, klass_name, is_full_module_graph);
2298 
2299     // If you have specified consecutive fields of the same klass in
2300     // fields[], these will be archived in the same
2301     // {start_recording_subgraph ... done_recording_subgraph} pass to
2302     // save time.
2303     for (; fields[i].valid(); i++) {
2304       ArchivableStaticFieldInfo* f = &fields[i];
2305       if (f->klass_name != klass_name) {
2306         break;
2307       }
2308 
2309       archive_reachable_objects_from_static_field(f->klass, f->klass_name,
2310                                                   f->offset, f->field_name);
2311     }
2312     done_recording_subgraph(info->klass, klass_name);
2313   }
2314 
2315   log_info(aot, heap)("Archived subgraph records = %zu",
2316                       _num_total_subgraph_recordings);
< prev index next >