1 /* 2 * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveHeapLoader.hpp" 28 #include "cds/archiveUtils.hpp" 29 #include "cds/cdsHeapVerifier.hpp" 30 #include "cds/heapShared.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/classLoaderData.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/modules.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/systemDictionary.hpp" 38 #include "classfile/systemDictionaryShared.hpp" 39 #include "classfile/vmClasses.hpp" 40 #include "classfile/vmSymbols.hpp" 41 #include "gc/shared/collectedHeap.hpp" 42 #include "gc/shared/gcLocker.hpp" 43 #include "gc/shared/gcVMOperations.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/iterator.inline.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/fieldStreams.inline.hpp" 51 #include "oops/objArrayOop.inline.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/typeArrayOop.inline.hpp" 54 #include "prims/jvmtiExport.hpp" 55 #include "runtime/fieldDescriptor.inline.hpp" 56 #include "runtime/init.hpp" 57 #include "runtime/javaCalls.hpp" 58 #include "runtime/safepoint.hpp" 59 #include "runtime/safepointVerifiers.hpp" 60 #include "utilities/bitMap.inline.hpp" 61 #include "utilities/copy.hpp" 62 #if INCLUDE_G1GC 63 #include "gc/g1/g1CollectedHeap.hpp" 64 #endif 65 66 #if INCLUDE_CDS_JAVA_HEAP 67 68 struct ArchivableStaticFieldInfo { 69 const char* klass_name; 70 const char* field_name; 71 InstanceKlass* klass; 72 int offset; 73 BasicType type; 74 75 ArchivableStaticFieldInfo(const char* k, const char* f) 76 : klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {} 77 78 bool valid() { 79 return klass_name != NULL; 80 } 81 }; 82 83 bool HeapShared::_disable_writing = false; 84 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 85 GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL; 86 87 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS]; 88 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS]; 89 size_t HeapShared::_total_obj_count; 90 size_t HeapShared::_total_obj_size; 91 92 #ifndef PRODUCT 93 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects" 94 static Array<char>* _archived_ArchiveHeapTestClass = NULL; 95 static const char* _test_class_name = NULL; 96 static const Klass* _test_class = NULL; 97 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL; 98 #endif 99 100 101 // 102 // If you add new entries to the following tables, you should know what you're doing! 103 // 104 105 // Entry fields for shareable subgraphs archived in the closed archive heap 106 // region. Warning: Objects in the subgraphs should not have reference fields 107 // assigned at runtime. 108 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 109 {"java/lang/Integer$IntegerCache", "archivedCache"}, 110 {"java/lang/Long$LongCache", "archivedCache"}, 111 {"java/lang/Byte$ByteCache", "archivedCache"}, 112 {"java/lang/Short$ShortCache", "archivedCache"}, 113 {"java/lang/Character$CharacterCache", "archivedCache"}, 114 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 115 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 116 {NULL, NULL}, 117 }; 118 // Entry fields for subgraphs archived in the open archive heap region. 119 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 120 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 121 {"java/util/ImmutableCollections", "archivedObjects"}, 122 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 123 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 124 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 125 #ifndef PRODUCT 126 {NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass 127 #endif 128 {NULL, NULL}, 129 }; 130 131 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 132 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 133 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 134 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 135 {"java/lang/Module$ArchivedData", "archivedData"}, 136 {NULL, NULL}, 137 }; 138 139 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 140 OopHandle HeapShared::_roots; 141 142 #ifdef ASSERT 143 bool HeapShared::is_archived_object_during_dumptime(oop p) { 144 assert(HeapShared::can_write(), "must be"); 145 assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); 146 return Universe::heap()->is_archived_object(p); 147 } 148 #endif 149 150 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) { 151 for (int i = 0; fields[i].valid(); i++) { 152 if (fields[i].klass == ik) { 153 return true; 154 } 155 } 156 return false; 157 } 158 159 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 160 return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) || 161 is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) || 162 is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik); 163 } 164 165 unsigned HeapShared::oop_hash(oop const& p) { 166 // Do not call p->identity_hash() as that will update the 167 // object header. 168 return primitive_hash(cast_from_oop<intptr_t>(p)); 169 } 170 171 static void reset_states(oop obj, TRAPS) { 172 Handle h_obj(THREAD, obj); 173 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 174 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 175 Symbol* method_sig = vmSymbols::void_method_signature(); 176 177 while (klass != NULL) { 178 Method* method = klass->find_method(method_name, method_sig); 179 if (method != NULL) { 180 assert(method->is_private(), "must be"); 181 if (log_is_enabled(Debug, cds)) { 182 ResourceMark rm(THREAD); 183 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 184 } 185 JavaValue result(T_VOID); 186 JavaCalls::call_special(&result, h_obj, klass, 187 method_name, method_sig, CHECK); 188 } 189 klass = klass->java_super(); 190 } 191 } 192 193 void HeapShared::reset_archived_object_states(TRAPS) { 194 assert(DumpSharedSpaces, "dump-time only"); 195 log_debug(cds)("Resetting platform loader"); 196 reset_states(SystemDictionary::java_platform_loader(), CHECK); 197 log_debug(cds)("Resetting system loader"); 198 reset_states(SystemDictionary::java_system_loader(), CHECK); 199 200 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not 201 // directly used for class loading, but rather is used by the core library 202 // to keep track of resources, etc, loaded by the null class loader. 203 // 204 // Note, this object is non-null, and is not the same as 205 // ClassLoaderData::the_null_class_loader_data()->class_loader(), 206 // which is null. 207 log_debug(cds)("Resetting boot loader"); 208 JavaValue result(T_OBJECT); 209 JavaCalls::call_static(&result, 210 vmClasses::jdk_internal_loader_ClassLoaders_klass(), 211 vmSymbols::bootLoader_name(), 212 vmSymbols::void_BuiltinClassLoader_signature(), 213 CHECK); 214 Handle boot_loader(THREAD, result.get_oop()); 215 reset_states(boot_loader(), CHECK); 216 } 217 218 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 219 HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL; 220 oop HeapShared::find_archived_heap_object(oop obj) { 221 assert(DumpSharedSpaces, "dump-time only"); 222 ArchivedObjectCache* cache = archived_object_cache(); 223 CachedOopInfo* p = cache->get(obj); 224 if (p != NULL) { 225 return p->_obj; 226 } else { 227 return NULL; 228 } 229 } 230 231 int HeapShared::append_root(oop obj) { 232 assert(DumpSharedSpaces, "dump-time only"); 233 234 // No GC should happen since we aren't scanning _pending_roots. 235 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 236 237 if (_pending_roots == NULL) { 238 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 239 } 240 241 return _pending_roots->append(obj); 242 } 243 244 objArrayOop HeapShared::roots() { 245 if (DumpSharedSpaces) { 246 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 247 if (!HeapShared::can_write()) { 248 return NULL; 249 } 250 } else { 251 assert(UseSharedSpaces, "must be"); 252 } 253 254 objArrayOop roots = (objArrayOop)_roots.resolve(); 255 assert(roots != NULL, "should have been initialized"); 256 return roots; 257 } 258 259 // Returns an objArray that contains all the roots of the archived objects 260 oop HeapShared::get_root(int index, bool clear) { 261 assert(index >= 0, "sanity"); 262 if (DumpSharedSpaces) { 263 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 264 assert(_pending_roots != NULL, "sanity"); 265 return _pending_roots->at(index); 266 } else { 267 assert(UseSharedSpaces, "must be"); 268 assert(!_roots.is_empty(), "must have loaded shared heap"); 269 oop result = roots()->obj_at(index); 270 if (clear) { 271 clear_root(index); 272 } 273 return result; 274 } 275 } 276 277 void HeapShared::clear_root(int index) { 278 assert(index >= 0, "sanity"); 279 assert(UseSharedSpaces, "must be"); 280 if (ArchiveHeapLoader::is_fully_available()) { 281 if (log_is_enabled(Debug, cds, heap)) { 282 oop old = roots()->obj_at(index); 283 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 284 } 285 roots()->obj_at_put(index, NULL); 286 } 287 } 288 289 oop HeapShared::archive_object(oop obj) { 290 assert(DumpSharedSpaces, "dump-time only"); 291 292 assert(!obj->is_stackChunk(), "do not archive stack chunks"); 293 294 oop ao = find_archived_heap_object(obj); 295 if (ao != NULL) { 296 // already archived 297 return ao; 298 } 299 300 int len = obj->size(); 301 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 302 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 303 p2i(obj), (size_t)obj->size()); 304 return NULL; 305 } 306 307 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 308 if (archived_oop != NULL) { 309 count_allocation(len); 310 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 311 // Reinitialize markword to remove age/marking/locking/etc. 312 // 313 // We need to retain the identity_hash, because it may have been used by some hashtables 314 // in the shared heap. This also has the side effect of pre-initializing the 315 // identity_hash for all shared objects, so they are less likely to be written 316 // into during run time, increasing the potential of memory sharing. 317 int hash_original = obj->identity_hash(); 318 319 assert(SafepointSynchronize::is_at_safepoint(), "resolving displaced headers only at safepoint"); 320 markWord mark = obj->mark(); 321 if (mark.has_displaced_mark_helper()) { 322 mark = mark.displaced_mark_helper(); 323 } 324 narrowKlass nklass = mark.narrow_klass(); 325 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original) LP64_ONLY(.set_narrow_klass(nklass))); 326 assert(archived_oop->mark().is_unlocked(), "sanity"); 327 328 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 329 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 330 331 ArchivedObjectCache* cache = archived_object_cache(); 332 CachedOopInfo info = make_cached_oop_info(archived_oop); 333 cache->put(obj, info); 334 if (_original_object_table != NULL) { 335 _original_object_table->put(archived_oop, obj); 336 } 337 mark_native_pointers(obj, archived_oop); 338 if (log_is_enabled(Debug, cds, heap)) { 339 ResourceMark rm; 340 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 341 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 342 } 343 } else { 344 log_error(cds, heap)( 345 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 346 p2i(obj)); 347 log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 348 SIZE_FORMAT "M", MaxHeapSize/M); 349 os::_exit(-1); 350 } 351 return archived_oop; 352 } 353 354 void HeapShared::archive_klass_objects() { 355 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 356 assert(klasses != NULL, "sanity"); 357 for (int i = 0; i < klasses->length(); i++) { 358 Klass* k = ArchiveBuilder::get_buffered_klass(klasses->at(i)); 359 360 // archive mirror object 361 java_lang_Class::archive_mirror(k); 362 363 // archive the resolved_referenes array 364 if (k->is_instance_klass()) { 365 InstanceKlass* ik = InstanceKlass::cast(k); 366 ik->constants()->archive_resolved_references(); 367 } 368 } 369 } 370 371 void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) { 372 if (java_lang_Class::is_instance(orig_obj)) { 373 mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset()); 374 mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset()); 375 } 376 } 377 378 void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) { 379 Metadata* ptr = archived_obj->metadata_field_acquire(offset); 380 if (ptr != NULL) { 381 // Set the native pointer to the requested address (at runtime, if the metadata 382 // is mapped at the default location, it will be at this address). 383 address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr); 384 address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr); 385 archived_obj->metadata_field_put(offset, (Metadata*)requested_addr); 386 387 // Remember this pointer. At runtime, if the metadata is mapped at a non-default 388 // location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()). 389 _native_pointers->append(archived_obj->field_addr<Metadata*>(offset)); 390 391 log_debug(cds, heap, mirror)( 392 "Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT, 393 offset, p2i(ptr), p2i(requested_addr)); 394 } 395 } 396 397 // -- Handling of Enum objects 398 // Java Enum classes have synthetic <clinit> methods that look like this 399 // enum MyEnum {FOO, BAR} 400 // MyEnum::<clinint> { 401 // /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO"); 402 // /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR"); 403 // } 404 // 405 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must 406 // ensure the archived value equals (in object address) to the runtime value of 407 // MyEnum::FOO. 408 // 409 // However, since MyEnum::<clinint> is synthetically generated by javac, there's 410 // no way of programmatically handling this inside the Java code (as you would handle 411 // ModuleLayer::EMPTY_LAYER, for example). 412 // 413 // Instead, we archive all static field of such Enum classes. At runtime, 414 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull 415 // the static fields out of the archived heap. 416 void HeapShared::check_enum_obj(int level, 417 KlassSubGraphInfo* subgraph_info, 418 oop orig_obj, 419 bool is_closed_archive) { 420 Klass* k = orig_obj->klass(); 421 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k); 422 if (!k->is_instance_klass()) { 423 return; 424 } 425 InstanceKlass* ik = InstanceKlass::cast(k); 426 if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) { 427 ResourceMark rm; 428 ik->set_has_archived_enum_objs(); 429 buffered_k->set_has_archived_enum_objs(); 430 oop mirror = ik->java_mirror(); 431 432 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 433 if (fs.access_flags().is_static()) { 434 fieldDescriptor& fd = fs.field_descriptor(); 435 if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) { 436 guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY", 437 ik->external_name(), fd.name()->as_C_string()); 438 } 439 oop oop_field = mirror->obj_field(fd.offset()); 440 if (oop_field == NULL) { 441 guarantee(false, "static field %s::%s must not be null", 442 ik->external_name(), fd.name()->as_C_string()); 443 } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) { 444 guarantee(false, "static field %s::%s is of the wrong type", 445 ik->external_name(), fd.name()->as_C_string()); 446 } 447 oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive); 448 int root_index = append_root(archived_oop_field); 449 log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")", 450 root_index, ik->external_name(), fd.name()->as_C_string(), 451 p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field)); 452 SystemDictionaryShared::add_enum_klass_static_field(ik, root_index); 453 } 454 } 455 } 456 } 457 458 // See comments in HeapShared::check_enum_obj() 459 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) { 460 if (!ArchiveHeapLoader::is_fully_available()) { 461 return false; 462 } 463 464 RunTimeClassInfo* info = RunTimeClassInfo::get_for(k); 465 assert(info != NULL, "sanity"); 466 467 if (log_is_enabled(Info, cds, heap)) { 468 ResourceMark rm; 469 log_info(cds, heap)("Initializing Enum class: %s", k->external_name()); 470 } 471 472 oop mirror = k->java_mirror(); 473 int i = 0; 474 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 475 if (fs.access_flags().is_static()) { 476 int root_index = info->enum_klass_static_field_root_index_at(i++); 477 fieldDescriptor& fd = fs.field_descriptor(); 478 assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be"); 479 mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true)); 480 } 481 } 482 return true; 483 } 484 485 void HeapShared::run_full_gc_in_vm_thread() { 486 if (HeapShared::can_write()) { 487 // Avoid fragmentation while archiving heap objects. 488 // We do this inside a safepoint, so that no further allocation can happen after GC 489 // has finished. 490 if (GCLocker::is_active()) { 491 // Just checking for safety ... 492 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 493 // has been modified such that JNI code is executed in some clean up threads after 494 // we have finished class loading. 495 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 496 } else { 497 log_info(cds)("Run GC ..."); 498 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 499 log_info(cds)("Run GC done"); 500 } 501 } 502 } 503 504 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions, 505 GrowableArray<MemRegion>* open_regions) { 506 507 G1HeapVerifier::verify_ready_for_archiving(); 508 509 { 510 NoSafepointVerifier nsv; 511 512 // Cache for recording where the archived objects are copied to 513 create_archived_object_cache(log_is_enabled(Info, cds, map)); 514 515 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 516 UseCompressedOops ? p2i(CompressedOops::begin()) : 517 p2i((address)G1CollectedHeap::heap()->reserved().start()), 518 UseCompressedOops ? p2i(CompressedOops::end()) : 519 p2i((address)G1CollectedHeap::heap()->reserved().end())); 520 log_info(cds)("Dumping objects to closed archive heap region ..."); 521 copy_closed_objects(closed_regions); 522 523 log_info(cds)("Dumping objects to open archive heap region ..."); 524 copy_open_objects(open_regions); 525 526 CDSHeapVerifier::verify(); 527 } 528 529 G1HeapVerifier::verify_archive_regions(); 530 } 531 532 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) { 533 assert(HeapShared::can_write(), "must be"); 534 535 G1CollectedHeap::heap()->begin_archive_alloc_range(); 536 537 // Archive interned string objects 538 StringTable::write_to_archive(_dumped_interned_strings); 539 540 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 541 true /* is_closed_archive */, 542 false /* is_full_module_graph */); 543 544 G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions, 545 os::vm_allocation_granularity()); 546 } 547 548 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) { 549 assert(HeapShared::can_write(), "must be"); 550 551 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 552 553 java_lang_Class::archive_basic_type_mirrors(); 554 555 archive_klass_objects(); 556 557 archive_object_subgraphs(open_archive_subgraph_entry_fields, 558 false /* is_closed_archive */, 559 false /* is_full_module_graph */); 560 if (MetaspaceShared::use_full_module_graph()) { 561 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 562 false /* is_closed_archive */, 563 true /* is_full_module_graph */); 564 Modules::verify_archived_modules(); 565 } 566 567 copy_roots(); 568 569 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions, 570 os::vm_allocation_granularity()); 571 } 572 573 // Copy _pending_archive_roots into an objArray 574 void HeapShared::copy_roots() { 575 // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the 576 // objects in this array are discovered during HeapShared::archive_objects(). For example, 577 // in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj(). 578 // However, HeapShared::archive_objects() happens inside a safepoint, so we can't 579 // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object(). 580 // Instead, we have to roll our own alloc/copy routine here. 581 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 582 size_t size = objArrayOopDesc::object_size(length); 583 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 584 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 585 586 memset(mem, 0, size * BytesPerWord); 587 { 588 // This is copied from MemAllocator::finish 589 oopDesc::set_mark(mem, k->prototype_header()); 590 #ifndef _LP64 591 oopDesc::release_set_klass(mem, k); 592 #endif 593 } 594 { 595 // This is copied from ObjArrayAllocator::initialize 596 arrayOopDesc::set_length(mem, length); 597 } 598 599 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 600 for (int i = 0; i < length; i++) { 601 roots()->obj_at_put(i, _pending_roots->at(i)); 602 } 603 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem); 604 count_allocation(roots()->size()); 605 } 606 607 // 608 // Subgraph archiving support 609 // 610 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 611 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 612 613 // Get the subgraph_info for Klass k. A new subgraph_info is created if 614 // there is no existing one for k. The subgraph_info records the "buffered" 615 // address of the class. 616 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 617 assert(DumpSharedSpaces, "dump time only"); 618 bool created; 619 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k); 620 KlassSubGraphInfo* info = 621 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph), 622 &created); 623 assert(created, "must not initialize twice"); 624 return info; 625 } 626 627 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 628 assert(DumpSharedSpaces, "dump time only"); 629 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 630 assert(info != NULL, "must have been initialized"); 631 return info; 632 } 633 634 // Add an entry field to the current KlassSubGraphInfo. 635 void KlassSubGraphInfo::add_subgraph_entry_field( 636 int static_field_offset, oop v, bool is_closed_archive) { 637 assert(DumpSharedSpaces, "dump time only"); 638 if (_subgraph_entry_fields == NULL) { 639 _subgraph_entry_fields = 640 new (mtClass) GrowableArray<int>(10, mtClass); 641 } 642 _subgraph_entry_fields->append(static_field_offset); 643 _subgraph_entry_fields->append(HeapShared::append_root(v)); 644 } 645 646 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 647 // Only objects of boot classes can be included in sub-graph. 648 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 649 assert(DumpSharedSpaces, "dump time only"); 650 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k); 651 652 if (_subgraph_object_klasses == NULL) { 653 _subgraph_object_klasses = 654 new (mtClass) GrowableArray<Klass*>(50, mtClass); 655 } 656 657 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class"); 658 659 if (_k == buffered_k) { 660 // Don't add the Klass containing the sub-graph to it's own klass 661 // initialization list. 662 return; 663 } 664 665 if (buffered_k->is_instance_klass()) { 666 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(), 667 "must be boot class"); 668 // vmClasses::xxx_klass() are not updated, need to check 669 // the original Klass* 670 if (orig_k == vmClasses::String_klass() || 671 orig_k == vmClasses::Object_klass()) { 672 // Initialized early during VM initialization. No need to be added 673 // to the sub-graph object class list. 674 return; 675 } 676 check_allowed_klass(InstanceKlass::cast(orig_k)); 677 } else if (buffered_k->is_objArray_klass()) { 678 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass(); 679 if (abk->is_instance_klass()) { 680 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 681 "must be boot class"); 682 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass())); 683 } 684 if (buffered_k == Universe::objectArrayKlassObj()) { 685 // Initialized early during Universe::genesis. No need to be added 686 // to the list. 687 return; 688 } 689 } else { 690 assert(buffered_k->is_typeArray_klass(), "must be"); 691 // Primitive type arrays are created early during Universe::genesis. 692 return; 693 } 694 695 if (log_is_enabled(Debug, cds, heap)) { 696 if (!_subgraph_object_klasses->contains(buffered_k)) { 697 ResourceMark rm; 698 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 699 } 700 } 701 702 _subgraph_object_klasses->append_if_missing(buffered_k); 703 _has_non_early_klasses |= is_non_early_klass(orig_k); 704 } 705 706 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { 707 if (ik->module()->name() == vmSymbols::java_base()) { 708 assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package"); 709 return; 710 } 711 712 #ifndef PRODUCT 713 if (!ik->module()->is_named() && ik->package() == NULL) { 714 // This class is loaded by ArchiveHeapTestClass 715 return; 716 } 717 const char* extra_msg = ", or in an unnamed package of an unnamed module"; 718 #else 719 const char* extra_msg = ""; 720 #endif 721 722 ResourceMark rm; 723 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s", 724 ik->external_name(), extra_msg); 725 os::_exit(1); 726 } 727 728 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 729 if (k->is_objArray_klass()) { 730 k = ObjArrayKlass::cast(k)->bottom_klass(); 731 } 732 if (k->is_instance_klass()) { 733 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 734 ResourceMark rm; 735 log_info(cds, heap)("non-early: %s", k->external_name()); 736 return true; 737 } else { 738 return false; 739 } 740 } else { 741 return false; 742 } 743 } 744 745 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 746 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 747 _k = info->klass(); 748 _entry_field_records = NULL; 749 _subgraph_object_klasses = NULL; 750 _is_full_module_graph = info->is_full_module_graph(); 751 752 if (_is_full_module_graph) { 753 // Consider all classes referenced by the full module graph as early -- we will be 754 // allocating objects of these classes during JVMTI early phase, so they cannot 755 // be processed by (non-early) JVMTI ClassFileLoadHook 756 _has_non_early_klasses = false; 757 } else { 758 _has_non_early_klasses = info->has_non_early_klasses(); 759 } 760 761 if (_has_non_early_klasses) { 762 ResourceMark rm; 763 log_info(cds, heap)( 764 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 765 _k->external_name()); 766 } 767 768 // populate the entry fields 769 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 770 if (entry_fields != NULL) { 771 int num_entry_fields = entry_fields->length(); 772 assert(num_entry_fields % 2 == 0, "sanity"); 773 _entry_field_records = 774 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 775 for (int i = 0 ; i < num_entry_fields; i++) { 776 _entry_field_records->at_put(i, entry_fields->at(i)); 777 } 778 } 779 780 // the Klasses of the objects in the sub-graphs 781 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 782 if (subgraph_object_klasses != NULL) { 783 int num_subgraphs_klasses = subgraph_object_klasses->length(); 784 _subgraph_object_klasses = 785 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 786 for (int i = 0; i < num_subgraphs_klasses; i++) { 787 Klass* subgraph_k = subgraph_object_klasses->at(i); 788 if (log_is_enabled(Info, cds, heap)) { 789 ResourceMark rm; 790 log_info(cds, heap)( 791 "Archived object klass %s (%2d) => %s", 792 _k->external_name(), i, subgraph_k->external_name()); 793 } 794 _subgraph_object_klasses->at_put(i, subgraph_k); 795 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 796 } 797 } 798 799 ArchivePtrMarker::mark_pointer(&_k); 800 ArchivePtrMarker::mark_pointer(&_entry_field_records); 801 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 802 } 803 804 struct CopyKlassSubGraphInfoToArchive : StackObj { 805 CompactHashtableWriter* _writer; 806 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 807 808 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 809 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 810 ArchivedKlassSubGraphInfoRecord* record = 811 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 812 record->init(&info); 813 814 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass); 815 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k); 816 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 817 _writer->add(hash, delta); 818 } 819 return true; // keep on iterating 820 } 821 }; 822 823 // Build the records of archived subgraph infos, which include: 824 // - Entry points to all subgraphs from the containing class mirror. The entry 825 // points are static fields in the mirror. For each entry point, the field 826 // offset, value and is_closed_archive flag are recorded in the sub-graph 827 // info. The value is stored back to the corresponding field at runtime. 828 // - A list of klasses that need to be loaded/initialized before archived 829 // java object sub-graph can be accessed at runtime. 830 void HeapShared::write_subgraph_info_table() { 831 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 832 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 833 CompactHashtableStats stats; 834 835 _run_time_subgraph_info_table.reset(); 836 837 CompactHashtableWriter writer(d_table->_count, &stats); 838 CopyKlassSubGraphInfoToArchive copy(&writer); 839 d_table->iterate(©); 840 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 841 842 #ifndef PRODUCT 843 if (ArchiveHeapTestClass != NULL) { 844 size_t len = strlen(ArchiveHeapTestClass) + 1; 845 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len); 846 strncpy(array->adr_at(0), ArchiveHeapTestClass, len); 847 _archived_ArchiveHeapTestClass = array; 848 } 849 #endif 850 if (log_is_enabled(Info, cds, heap)) { 851 print_stats(); 852 } 853 } 854 855 void HeapShared::serialize_root(SerializeClosure* soc) { 856 oop roots_oop = NULL; 857 858 if (soc->reading()) { 859 soc->do_oop(&roots_oop); // read from archive 860 assert(oopDesc::is_oop_or_null(roots_oop), "is oop"); 861 // Create an OopHandle only if we have actually mapped or loaded the roots 862 if (roots_oop != NULL) { 863 assert(ArchiveHeapLoader::is_fully_available(), "must be"); 864 _roots = OopHandle(Universe::vm_global(), roots_oop); 865 } 866 } else { 867 // writing 868 roots_oop = roots(); 869 soc->do_oop(&roots_oop); // write to archive 870 } 871 } 872 873 void HeapShared::serialize_tables(SerializeClosure* soc) { 874 875 #ifndef PRODUCT 876 soc->do_ptr((void**)&_archived_ArchiveHeapTestClass); 877 if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) { 878 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0); 879 setup_test_class(_test_class_name); 880 } 881 #endif 882 883 _run_time_subgraph_info_table.serialize_header(soc); 884 } 885 886 static void verify_the_heap(Klass* k, const char* which) { 887 if (VerifyArchivedFields > 0) { 888 ResourceMark rm; 889 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 890 which, k->external_name()); 891 892 VM_Verify verify_op; 893 VMThread::execute(&verify_op); 894 895 if (VerifyArchivedFields > 1 && is_init_completed()) { 896 // At this time, the oop->klass() of some archived objects in the heap may not 897 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 898 // have enough information (object size, oop maps, etc) so that a GC can be safely 899 // performed. 900 // 901 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 902 // to check for GC safety. 903 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 904 which, k->external_name()); 905 FlagSetting fs1(VerifyBeforeGC, true); 906 FlagSetting fs2(VerifyDuringGC, true); 907 FlagSetting fs3(VerifyAfterGC, true); 908 Universe::heap()->collect(GCCause::_java_lang_system_gc); 909 } 910 } 911 } 912 913 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 914 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 915 // 916 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 917 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 918 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 919 void HeapShared::resolve_classes(JavaThread* current) { 920 assert(UseSharedSpaces, "runtime only!"); 921 if (!ArchiveHeapLoader::is_fully_available()) { 922 return; // nothing to do 923 } 924 resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields); 925 resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields); 926 resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields); 927 } 928 929 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { 930 for (int i = 0; fields[i].valid(); i++) { 931 ArchivableStaticFieldInfo* info = &fields[i]; 932 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 933 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 934 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 935 resolve_classes_for_subgraph_of(current, k); 936 } 937 } 938 939 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) { 940 JavaThread* THREAD = current; 941 ExceptionMark em(THREAD); 942 const ArchivedKlassSubGraphInfoRecord* record = 943 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 944 if (HAS_PENDING_EXCEPTION) { 945 CLEAR_PENDING_EXCEPTION; 946 } 947 if (record == NULL) { 948 clear_archived_roots_of(k); 949 } 950 } 951 952 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) { 953 JavaThread* THREAD = current; 954 if (!ArchiveHeapLoader::is_fully_available()) { 955 return; // nothing to do 956 } 957 958 ExceptionMark em(THREAD); 959 const ArchivedKlassSubGraphInfoRecord* record = 960 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 961 962 if (HAS_PENDING_EXCEPTION) { 963 CLEAR_PENDING_EXCEPTION; 964 // None of the field value will be set if there was an exception when initializing the classes. 965 // The java code will not see any of the archived objects in the 966 // subgraphs referenced from k in this case. 967 return; 968 } 969 970 if (record != NULL) { 971 init_archived_fields_for(k, record); 972 } 973 } 974 975 const ArchivedKlassSubGraphInfoRecord* 976 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 977 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 978 979 if (!k->is_shared()) { 980 return NULL; 981 } 982 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 983 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 984 985 #ifndef PRODUCT 986 if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) { 987 _test_class = k; 988 _test_class_record = record; 989 } 990 #endif 991 992 // Initialize from archived data. Currently this is done only 993 // during VM initialization time. No lock is needed. 994 if (record != NULL) { 995 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 996 if (log_is_enabled(Info, cds, heap)) { 997 ResourceMark rm(THREAD); 998 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 999 k->external_name()); 1000 } 1001 return NULL; 1002 } 1003 1004 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 1005 if (log_is_enabled(Info, cds, heap)) { 1006 ResourceMark rm(THREAD); 1007 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 1008 k->external_name()); 1009 } 1010 return NULL; 1011 } 1012 1013 if (log_is_enabled(Info, cds, heap)) { 1014 ResourceMark rm; 1015 log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name()); 1016 } 1017 1018 resolve_or_init(k, do_init, CHECK_NULL); 1019 1020 // Load/link/initialize the klasses of the objects in the subgraph. 1021 // NULL class loader is used. 1022 Array<Klass*>* klasses = record->subgraph_object_klasses(); 1023 if (klasses != NULL) { 1024 for (int i = 0; i < klasses->length(); i++) { 1025 Klass* klass = klasses->at(i); 1026 if (!klass->is_shared()) { 1027 return NULL; 1028 } 1029 resolve_or_init(klass, do_init, CHECK_NULL); 1030 } 1031 } 1032 } 1033 1034 return record; 1035 } 1036 1037 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 1038 if (!do_init) { 1039 if (k->class_loader_data() == NULL) { 1040 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 1041 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 1042 } 1043 } else { 1044 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 1045 if (k->is_instance_klass()) { 1046 InstanceKlass* ik = InstanceKlass::cast(k); 1047 ik->initialize(CHECK); 1048 } else if (k->is_objArray_klass()) { 1049 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 1050 oak->initialize(CHECK); 1051 } 1052 } 1053 } 1054 1055 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 1056 verify_the_heap(k, "before"); 1057 1058 // Load the subgraph entry fields from the record and store them back to 1059 // the corresponding fields within the mirror. 1060 oop m = k->java_mirror(); 1061 Array<int>* entry_field_records = record->entry_field_records(); 1062 if (entry_field_records != NULL) { 1063 int efr_len = entry_field_records->length(); 1064 assert(efr_len % 2 == 0, "sanity"); 1065 for (int i = 0; i < efr_len; i += 2) { 1066 int field_offset = entry_field_records->at(i); 1067 int root_index = entry_field_records->at(i+1); 1068 oop v = get_root(root_index, /*clear=*/true); 1069 m->obj_field_put(field_offset, v); 1070 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 1071 } 1072 1073 // Done. Java code can see the archived sub-graphs referenced from k's 1074 // mirror after this point. 1075 if (log_is_enabled(Info, cds, heap)) { 1076 ResourceMark rm; 1077 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 1078 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 1079 } 1080 } 1081 1082 verify_the_heap(k, "after "); 1083 } 1084 1085 void HeapShared::clear_archived_roots_of(Klass* k) { 1086 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1087 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1088 if (record != NULL) { 1089 Array<int>* entry_field_records = record->entry_field_records(); 1090 if (entry_field_records != NULL) { 1091 int efr_len = entry_field_records->length(); 1092 assert(efr_len % 2 == 0, "sanity"); 1093 for (int i = 0; i < efr_len; i += 2) { 1094 int root_index = entry_field_records->at(i+1); 1095 clear_root(root_index); 1096 } 1097 } 1098 } 1099 } 1100 1101 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 1102 int _level; 1103 bool _is_closed_archive; 1104 bool _record_klasses_only; 1105 KlassSubGraphInfo* _subgraph_info; 1106 oop _orig_referencing_obj; 1107 oop _archived_referencing_obj; 1108 1109 // The following are for maintaining a stack for determining 1110 // CachedOopInfo::_referrer 1111 static WalkOopAndArchiveClosure* _current; 1112 WalkOopAndArchiveClosure* _last; 1113 public: 1114 WalkOopAndArchiveClosure(int level, 1115 bool is_closed_archive, 1116 bool record_klasses_only, 1117 KlassSubGraphInfo* subgraph_info, 1118 oop orig, oop archived) : 1119 _level(level), _is_closed_archive(is_closed_archive), 1120 _record_klasses_only(record_klasses_only), 1121 _subgraph_info(subgraph_info), 1122 _orig_referencing_obj(orig), _archived_referencing_obj(archived) { 1123 _last = _current; 1124 _current = this; 1125 } 1126 ~WalkOopAndArchiveClosure() { 1127 _current = _last; 1128 } 1129 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1130 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1131 1132 protected: 1133 template <class T> void do_oop_work(T *p) { 1134 oop obj = RawAccess<>::oop_load(p); 1135 if (!CompressedOops::is_null(obj)) { 1136 assert(!HeapShared::is_archived_object_during_dumptime(obj), 1137 "original objects must not point to archived objects"); 1138 1139 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 1140 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 1141 1142 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 1143 ResourceMark rm; 1144 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level, 1145 _orig_referencing_obj->klass()->external_name(), field_delta, 1146 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1147 LogTarget(Trace, cds, heap) log; 1148 LogStream out(log); 1149 obj->print_on(&out); 1150 } 1151 1152 oop archived = HeapShared::archive_reachable_objects_from( 1153 _level + 1, _subgraph_info, obj, _is_closed_archive); 1154 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 1155 assert(HeapShared::is_archived_object_during_dumptime(archived), "must be"); 1156 1157 if (!_record_klasses_only) { 1158 // Update the reference in the archived copy of the referencing object. 1159 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 1160 _level, p2i(new_p), p2i(obj), p2i(archived)); 1161 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 1162 } 1163 } 1164 } 1165 1166 public: 1167 static WalkOopAndArchiveClosure* current() { return _current; } 1168 oop orig_referencing_obj() { return _orig_referencing_obj; } 1169 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1170 }; 1171 1172 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL; 1173 1174 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) { 1175 CachedOopInfo info; 1176 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); 1177 1178 info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info(); 1179 info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj(); 1180 info._obj = orig_obj; 1181 1182 return info; 1183 } 1184 1185 void HeapShared::check_closed_region_object(InstanceKlass* k) { 1186 // Check fields in the object 1187 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 1188 if (!fs.access_flags().is_static()) { 1189 BasicType ft = fs.field_descriptor().field_type(); 1190 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 1191 ResourceMark rm; 1192 log_warning(cds, heap)( 1193 "Please check reference field in %s instance in closed archive heap region: %s %s", 1194 k->external_name(), (fs.name())->as_C_string(), 1195 (fs.signature())->as_C_string()); 1196 } 1197 } 1198 } 1199 } 1200 1201 // (1) If orig_obj has not been archived yet, archive it. 1202 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1203 // trace all objects that are reachable from it, and make sure these objects are archived. 1204 // (3) Record the klasses of all orig_obj and all reachable objects. 1205 oop HeapShared::archive_reachable_objects_from(int level, 1206 KlassSubGraphInfo* subgraph_info, 1207 oop orig_obj, 1208 bool is_closed_archive) { 1209 assert(orig_obj != NULL, "must be"); 1210 assert(!is_archived_object_during_dumptime(orig_obj), "sanity"); 1211 1212 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1213 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1214 // If you get an error here, you probably made a change in the JDK library that has added 1215 // these objects that are referenced (directly or indirectly) by static fields. 1216 ResourceMark rm; 1217 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 1218 os::_exit(1); 1219 } 1220 1221 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 1222 // them as Klass::_archived_mirror because they need to be specially restored at run time. 1223 // 1224 // If you get an error here, you probably made a change in the JDK library that has added a Class 1225 // object that is referenced (directly or indirectly) by static fields. 1226 if (java_lang_Class::is_instance(orig_obj)) { 1227 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1228 os::_exit(1); 1229 } 1230 1231 oop archived_obj = find_archived_heap_object(orig_obj); 1232 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 1233 // To save time, don't walk strings that are already archived. They just contain 1234 // pointers to a type array, whose klass doesn't need to be recorded. 1235 return archived_obj; 1236 } 1237 1238 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1239 // orig_obj has already been archived and traced. Nothing more to do. 1240 return archived_obj; 1241 } else { 1242 set_has_been_seen_during_subgraph_recording(orig_obj); 1243 } 1244 1245 bool record_klasses_only = (archived_obj != NULL); 1246 if (archived_obj == NULL) { 1247 ++_num_new_archived_objs; 1248 archived_obj = archive_object(orig_obj); 1249 if (archived_obj == NULL) { 1250 // Skip archiving the sub-graph referenced from the current entry field. 1251 ResourceMark rm; 1252 log_error(cds, heap)( 1253 "Cannot archive the sub-graph referenced from %s object (" 1254 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.", 1255 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1256 if (level == 1) { 1257 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1258 // as the Java code will take care of initializing this field dynamically. 1259 return NULL; 1260 } else { 1261 // We don't know how to handle an object that has been archived, but some of its reachable 1262 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1263 // we have a real use case. 1264 os::_exit(1); 1265 } 1266 } 1267 1268 if (java_lang_Module::is_instance(orig_obj)) { 1269 if (Modules::check_module_oop(orig_obj)) { 1270 Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj)); 1271 } 1272 java_lang_Module::set_module_entry(archived_obj, NULL); 1273 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1274 // class_data will be restored explicitly at run time. 1275 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1276 orig_obj == SystemDictionary::java_system_loader() || 1277 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be"); 1278 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1279 } 1280 } 1281 1282 assert(archived_obj != NULL, "must be"); 1283 Klass *orig_k = orig_obj->klass(); 1284 subgraph_info->add_subgraph_object_klass(orig_k); 1285 1286 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1287 subgraph_info, orig_obj, archived_obj); 1288 orig_obj->oop_iterate(&walker); 1289 if (is_closed_archive && orig_k->is_instance_klass()) { 1290 check_closed_region_object(InstanceKlass::cast(orig_k)); 1291 } 1292 1293 check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive); 1294 return archived_obj; 1295 } 1296 1297 // 1298 // Start from the given static field in a java mirror and archive the 1299 // complete sub-graph of java heap objects that are reached directly 1300 // or indirectly from the starting object by following references. 1301 // Sub-graph archiving restrictions (current): 1302 // 1303 // - All classes of objects in the archived sub-graph (including the 1304 // entry class) must be boot class only. 1305 // - No java.lang.Class instance (java mirror) can be included inside 1306 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1307 // 1308 // The Java heap object sub-graph archiving process (see 1309 // WalkOopAndArchiveClosure): 1310 // 1311 // 1) Java object sub-graph archiving starts from a given static field 1312 // within a Class instance (java mirror). If the static field is a 1313 // reference field and points to a non-null java object, proceed to 1314 // the next step. 1315 // 1316 // 2) Archives the referenced java object. If an archived copy of the 1317 // current object already exists, updates the pointer in the archived 1318 // copy of the referencing object to point to the current archived object. 1319 // Otherwise, proceed to the next step. 1320 // 1321 // 3) Follows all references within the current java object and recursively 1322 // archive the sub-graph of objects starting from each reference. 1323 // 1324 // 4) Updates the pointer in the archived copy of referencing object to 1325 // point to the current archived object. 1326 // 1327 // 5) The Klass of the current java object is added to the list of Klasses 1328 // for loading and initializing before any object in the archived graph can 1329 // be accessed at runtime. 1330 // 1331 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1332 const char* klass_name, 1333 int field_offset, 1334 const char* field_name, 1335 bool is_closed_archive) { 1336 assert(DumpSharedSpaces, "dump time only"); 1337 assert(k->is_shared_boot_class(), "must be boot class"); 1338 1339 oop m = k->java_mirror(); 1340 1341 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1342 oop f = m->obj_field(field_offset); 1343 1344 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1345 1346 if (!CompressedOops::is_null(f)) { 1347 if (log_is_enabled(Trace, cds, heap)) { 1348 LogTarget(Trace, cds, heap) log; 1349 LogStream out(log); 1350 f->print_on(&out); 1351 } 1352 1353 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1354 1355 if (af == NULL) { 1356 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1357 klass_name, field_name); 1358 } else { 1359 // Note: the field value is not preserved in the archived mirror. 1360 // Record the field as a new subGraph entry point. The recorded 1361 // information is restored from the archive at runtime. 1362 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1363 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1364 } 1365 } else { 1366 // The field contains null, we still need to record the entry point, 1367 // so it can be restored at runtime. 1368 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1369 } 1370 } 1371 1372 #ifndef PRODUCT 1373 class VerifySharedOopClosure: public BasicOopIterateClosure { 1374 private: 1375 bool _is_archived; 1376 1377 public: 1378 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1379 1380 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1381 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1382 1383 protected: 1384 template <class T> void do_oop_work(T *p) { 1385 oop obj = RawAccess<>::oop_load(p); 1386 if (!CompressedOops::is_null(obj)) { 1387 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1388 } 1389 } 1390 }; 1391 1392 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1393 assert(DumpSharedSpaces, "dump time only"); 1394 assert(k->is_shared_boot_class(), "must be boot class"); 1395 1396 oop m = k->java_mirror(); 1397 oop f = m->obj_field(field_offset); 1398 if (!CompressedOops::is_null(f)) { 1399 verify_subgraph_from(f); 1400 } 1401 } 1402 1403 void HeapShared::verify_subgraph_from(oop orig_obj) { 1404 oop archived_obj = find_archived_heap_object(orig_obj); 1405 if (archived_obj == NULL) { 1406 // It's OK for the root of a subgraph to be not archived. See comments in 1407 // archive_reachable_objects_from(). 1408 return; 1409 } 1410 1411 // Verify that all objects reachable from orig_obj are archived. 1412 init_seen_objects_table(); 1413 verify_reachable_objects_from(orig_obj, false); 1414 delete_seen_objects_table(); 1415 1416 // Note: we could also verify that all objects reachable from the archived 1417 // copy of orig_obj can only point to archived objects, with: 1418 // init_seen_objects_table(); 1419 // verify_reachable_objects_from(archived_obj, true); 1420 // init_seen_objects_table(); 1421 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1422 // won't do it here. 1423 } 1424 1425 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1426 _num_total_verifications ++; 1427 if (!has_been_seen_during_subgraph_recording(obj)) { 1428 set_has_been_seen_during_subgraph_recording(obj); 1429 1430 if (is_archived) { 1431 assert(is_archived_object_during_dumptime(obj), "must be"); 1432 assert(find_archived_heap_object(obj) == NULL, "must be"); 1433 } else { 1434 assert(!is_archived_object_during_dumptime(obj), "must be"); 1435 assert(find_archived_heap_object(obj) != NULL, "must be"); 1436 } 1437 1438 VerifySharedOopClosure walker(is_archived); 1439 obj->oop_iterate(&walker); 1440 } 1441 } 1442 #endif 1443 1444 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1445 int HeapShared::_num_new_walked_objs; 1446 int HeapShared::_num_new_archived_objs; 1447 int HeapShared::_num_old_recorded_klasses; 1448 1449 int HeapShared::_num_total_subgraph_recordings = 0; 1450 int HeapShared::_num_total_walked_objs = 0; 1451 int HeapShared::_num_total_archived_objs = 0; 1452 int HeapShared::_num_total_recorded_klasses = 0; 1453 int HeapShared::_num_total_verifications = 0; 1454 1455 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1456 return _seen_objects_table->get(obj) != NULL; 1457 } 1458 1459 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1460 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1461 _seen_objects_table->put(obj, true); 1462 ++ _num_new_walked_objs; 1463 } 1464 1465 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1466 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1467 init_subgraph_info(k, is_full_module_graph); 1468 init_seen_objects_table(); 1469 _num_new_walked_objs = 0; 1470 _num_new_archived_objs = 0; 1471 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1472 } 1473 1474 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1475 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1476 _num_old_recorded_klasses; 1477 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1478 "walked %d objs, archived %d new objs, recorded %d classes", 1479 class_name, _num_new_walked_objs, _num_new_archived_objs, 1480 num_new_recorded_klasses); 1481 1482 delete_seen_objects_table(); 1483 1484 _num_total_subgraph_recordings ++; 1485 _num_total_walked_objs += _num_new_walked_objs; 1486 _num_total_archived_objs += _num_new_archived_objs; 1487 _num_total_recorded_klasses += num_new_recorded_klasses; 1488 } 1489 1490 class ArchivableStaticFieldFinder: public FieldClosure { 1491 InstanceKlass* _ik; 1492 Symbol* _field_name; 1493 bool _found; 1494 int _offset; 1495 public: 1496 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1497 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1498 1499 virtual void do_field(fieldDescriptor* fd) { 1500 if (fd->name() == _field_name) { 1501 assert(!_found, "fields can never be overloaded"); 1502 if (is_reference_type(fd->field_type())) { 1503 _found = true; 1504 _offset = fd->offset(); 1505 } 1506 } 1507 } 1508 bool found() { return _found; } 1509 int offset() { return _offset; } 1510 }; 1511 1512 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1513 TRAPS) { 1514 for (int i = 0; fields[i].valid(); i++) { 1515 ArchivableStaticFieldInfo* info = &fields[i]; 1516 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1517 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1518 ResourceMark rm; // for stringStream::as_string() etc. 1519 1520 #ifndef PRODUCT 1521 bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0); 1522 #else 1523 bool is_test_class = false; 1524 #endif 1525 1526 if (is_test_class) { 1527 log_warning(cds)("Loading ArchiveHeapTestClass %s ...", ArchiveHeapTestClass); 1528 } 1529 1530 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD); 1531 if (HAS_PENDING_EXCEPTION) { 1532 CLEAR_PENDING_EXCEPTION; 1533 stringStream st; 1534 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name); 1535 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1536 } 1537 1538 if (!k->is_instance_klass()) { 1539 stringStream st; 1540 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name); 1541 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1542 } 1543 1544 InstanceKlass* ik = InstanceKlass::cast(k); 1545 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1546 "Only support boot classes"); 1547 1548 if (is_test_class) { 1549 if (ik->module()->is_named()) { 1550 // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary 1551 // core-lib classes. You need to at least append to the bootclasspath. 1552 stringStream st; 1553 st.print("ArchiveHeapTestClass %s is not in unnamed module", ArchiveHeapTestClass); 1554 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1555 } 1556 1557 if (ik->package() != NULL) { 1558 // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy. 1559 stringStream st; 1560 st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass); 1561 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1562 } 1563 } else { 1564 if (ik->module()->name() != vmSymbols::java_base()) { 1565 // We don't want to deal with cases when a module is unavailable at runtime. 1566 // FUTURE -- load from archived heap only when module graph has not changed 1567 // between dump and runtime. 1568 stringStream st; 1569 st.print("%s is not in java.base module", info->klass_name); 1570 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1571 } 1572 } 1573 1574 if (is_test_class) { 1575 log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", ArchiveHeapTestClass); 1576 } 1577 ik->initialize(CHECK); 1578 1579 ArchivableStaticFieldFinder finder(ik, field_name); 1580 ik->do_local_static_fields(&finder); 1581 if (!finder.found()) { 1582 stringStream st; 1583 st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name); 1584 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1585 } 1586 1587 info->klass = ik; 1588 info->offset = finder.offset(); 1589 } 1590 } 1591 1592 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1593 assert(HeapShared::can_write(), "must be"); 1594 _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); 1595 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK); 1596 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK); 1597 if (MetaspaceShared::use_full_module_graph()) { 1598 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK); 1599 } 1600 } 1601 1602 #ifndef PRODUCT 1603 void HeapShared::setup_test_class(const char* test_class_name) { 1604 ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields; 1605 int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 1606 assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below"); 1607 assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list"); 1608 1609 if (test_class_name != NULL) { 1610 p[num_slots - 2].klass_name = test_class_name; 1611 p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME; 1612 } 1613 } 1614 1615 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass 1616 // during runtime. This may be called before the module system is initialized so 1617 // we cannot rely on InstanceKlass::module(), etc. 1618 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) { 1619 if (_test_class != NULL) { 1620 if (ik == _test_class) { 1621 return true; 1622 } 1623 Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses(); 1624 if (klasses == NULL) { 1625 return false; 1626 } 1627 1628 for (int i = 0; i < klasses->length(); i++) { 1629 Klass* k = klasses->at(i); 1630 if (k == ik) { 1631 Symbol* name; 1632 if (k->is_instance_klass()) { 1633 name = InstanceKlass::cast(k)->name(); 1634 } else if (k->is_objArray_klass()) { 1635 Klass* bk = ObjArrayKlass::cast(k)->bottom_klass(); 1636 if (!bk->is_instance_klass()) { 1637 return false; 1638 } 1639 name = bk->name(); 1640 } else { 1641 return false; 1642 } 1643 1644 // See KlassSubGraphInfo::check_allowed_klass() - only two types of 1645 // classes are allowed: 1646 // (A) java.base classes (which must not be in the unnamed module) 1647 // (B) test classes which must be in the unnamed package of the unnamed module. 1648 // So if we see a '/' character in the class name, it must be in (A); 1649 // otherwise it must be in (B). 1650 if (name->index_of_at(0, "/", 1) >= 0) { 1651 return false; // (A) 1652 } 1653 1654 return true; // (B) 1655 } 1656 } 1657 } 1658 1659 return false; 1660 } 1661 #endif 1662 1663 void HeapShared::init_for_dumping(TRAPS) { 1664 if (HeapShared::can_write()) { 1665 setup_test_class(ArchiveHeapTestClass); 1666 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(); 1667 _native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048); 1668 init_subgraph_entry_fields(CHECK); 1669 } 1670 } 1671 1672 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1673 bool is_closed_archive, 1674 bool is_full_module_graph) { 1675 _num_total_subgraph_recordings = 0; 1676 _num_total_walked_objs = 0; 1677 _num_total_archived_objs = 0; 1678 _num_total_recorded_klasses = 0; 1679 _num_total_verifications = 0; 1680 1681 // For each class X that has one or more archived fields: 1682 // [1] Dump the subgraph of each archived field 1683 // [2] Create a list of all the class of the objects that can be reached 1684 // by any of these static fields. 1685 // At runtime, these classes are initialized before X's archived fields 1686 // are restored by HeapShared::initialize_from_archived_subgraph(). 1687 int i; 1688 for (int i = 0; fields[i].valid(); ) { 1689 ArchivableStaticFieldInfo* info = &fields[i]; 1690 const char* klass_name = info->klass_name; 1691 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1692 1693 // If you have specified consecutive fields of the same klass in 1694 // fields[], these will be archived in the same 1695 // {start_recording_subgraph ... done_recording_subgraph} pass to 1696 // save time. 1697 for (; fields[i].valid(); i++) { 1698 ArchivableStaticFieldInfo* f = &fields[i]; 1699 if (f->klass_name != klass_name) { 1700 break; 1701 } 1702 1703 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1704 f->offset, f->field_name, 1705 is_closed_archive); 1706 } 1707 done_recording_subgraph(info->klass, klass_name); 1708 } 1709 1710 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1711 is_closed_archive ? "closed" : "open", 1712 _num_total_subgraph_recordings); 1713 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1714 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1715 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1716 1717 #ifndef PRODUCT 1718 for (int i = 0; fields[i].valid(); i++) { 1719 ArchivableStaticFieldInfo* f = &fields[i]; 1720 verify_subgraph_from_static_field(f->klass, f->offset); 1721 } 1722 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1723 #endif 1724 } 1725 1726 // Not all the strings in the global StringTable are dumped into the archive, because 1727 // some of those strings may be only referenced by classes that are excluded from 1728 // the archive. We need to explicitly mark the strings that are: 1729 // [1] used by classes that WILL be archived; 1730 // [2] included in the SharedArchiveConfigFile. 1731 void HeapShared::add_to_dumped_interned_strings(oop string) { 1732 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1733 bool created; 1734 _dumped_interned_strings->put_if_absent(string, true, &created); 1735 } 1736 1737 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1738 // region. This way we can quickly relocate all the pointers without using 1739 // BasicOopIterateClosure at runtime. 1740 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1741 void* _start; 1742 BitMap *_oopmap; 1743 int _num_total_oops; 1744 int _num_null_oops; 1745 public: 1746 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 1747 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1748 1749 virtual void do_oop(narrowOop* p) { 1750 assert(UseCompressedOops, "sanity"); 1751 _num_total_oops ++; 1752 narrowOop v = *p; 1753 if (!CompressedOops::is_null(v)) { 1754 // Note: HeapShared::to_requested_address() is not necessary because 1755 // the heap always starts at a deterministic address with UseCompressedOops==true. 1756 size_t idx = p - (narrowOop*)_start; 1757 _oopmap->set_bit(idx); 1758 } else { 1759 _num_null_oops ++; 1760 } 1761 } 1762 virtual void do_oop(oop* p) { 1763 assert(!UseCompressedOops, "sanity"); 1764 _num_total_oops ++; 1765 if ((*p) != NULL) { 1766 size_t idx = p - (oop*)_start; 1767 _oopmap->set_bit(idx); 1768 if (DumpSharedSpaces) { 1769 // Make heap content deterministic. 1770 *p = HeapShared::to_requested_address(*p); 1771 } 1772 } else { 1773 _num_null_oops ++; 1774 } 1775 } 1776 int num_total_oops() const { return _num_total_oops; } 1777 int num_null_oops() const { return _num_null_oops; } 1778 }; 1779 1780 1781 address HeapShared::to_requested_address(address dumptime_addr) { 1782 assert(DumpSharedSpaces, "static dump time only"); 1783 if (dumptime_addr == NULL || UseCompressedOops) { 1784 return dumptime_addr; 1785 } 1786 1787 // With UseCompressedOops==false, actual_base is selected by the OS so 1788 // it's different across -Xshare:dump runs. 1789 address actual_base = (address)G1CollectedHeap::heap()->reserved().start(); 1790 address actual_end = (address)G1CollectedHeap::heap()->reserved().end(); 1791 assert(actual_base <= dumptime_addr && dumptime_addr <= actual_end, "must be an address in the heap"); 1792 1793 // We always write the objects as if the heap started at this address. This 1794 // makes the heap content deterministic. 1795 // 1796 // Note that at runtime, the heap address is also selected by the OS, so 1797 // the archive heap will not be mapped at 0x10000000. Instead, we will call 1798 // HeapShared::patch_embedded_pointers() to relocate the heap contents 1799 // accordingly. 1800 const address REQUESTED_BASE = (address)0x10000000; 1801 intx delta = REQUESTED_BASE - actual_base; 1802 1803 address requested_addr = dumptime_addr + delta; 1804 assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity"); 1805 return requested_addr; 1806 } 1807 1808 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1809 size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 1810 ResourceBitMap oopmap(num_bits); 1811 1812 HeapWord* p = region.start(); 1813 HeapWord* end = region.end(); 1814 FindEmbeddedNonNullPointers finder((void*)p, &oopmap); 1815 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1816 1817 int num_objs = 0; 1818 while (p < end) { 1819 oop o = cast_to_oop(p); 1820 o->oop_iterate(&finder); 1821 p += o->size(); 1822 if (DumpSharedSpaces) { 1823 builder->relocate_klass_ptr_of_oop(o); 1824 } 1825 ++ num_objs; 1826 } 1827 1828 log_info(cds, heap)("calculate_oopmap: objects = %6d, oop fields = %7d (nulls = %7d)", 1829 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1830 return oopmap; 1831 } 1832 1833 1834 ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) { 1835 size_t num_bits = region.byte_size() / sizeof(Metadata*); 1836 ResourceBitMap oopmap(num_bits); 1837 1838 Metadata** start = (Metadata**)region.start(); 1839 Metadata** end = (Metadata**)region.end(); 1840 1841 int num_non_null_ptrs = 0; 1842 int len = _native_pointers->length(); 1843 for (int i = 0; i < len; i++) { 1844 Metadata** p = _native_pointers->at(i); 1845 if (start <= p && p < end) { 1846 assert(*p != NULL, "must be non-null"); 1847 num_non_null_ptrs ++; 1848 size_t idx = p - start; 1849 oopmap.set_bit(idx); 1850 } 1851 } 1852 1853 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of " 1854 SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits); 1855 if (num_non_null_ptrs > 0) { 1856 return oopmap; 1857 } else { 1858 return ResourceBitMap(0); 1859 } 1860 } 1861 1862 void HeapShared::count_allocation(size_t size) { 1863 _total_obj_count ++; 1864 _total_obj_size += size; 1865 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 1866 if (size <= (size_t(1) << i)) { 1867 _alloc_count[i] ++; 1868 _alloc_size[i] += size; 1869 return; 1870 } 1871 } 1872 } 1873 1874 static double avg_size(size_t size, size_t count) { 1875 double avg = 0; 1876 if (count > 0) { 1877 avg = double(size * HeapWordSize) / double(count); 1878 } 1879 return avg; 1880 } 1881 1882 void HeapShared::print_stats() { 1883 size_t huge_count = _total_obj_count; 1884 size_t huge_size = _total_obj_size; 1885 1886 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 1887 size_t byte_size_limit = (size_t(1) << i) * HeapWordSize; 1888 size_t count = _alloc_count[i]; 1889 size_t size = _alloc_size[i]; 1890 log_info(cds, heap)(SIZE_FORMAT_W(8) " objects are <= " SIZE_FORMAT_W(-6) 1891 " bytes (total " SIZE_FORMAT_W(8) " bytes, avg %8.1f bytes)", 1892 count, byte_size_limit, size * HeapWordSize, avg_size(size, count)); 1893 huge_count -= count; 1894 huge_size -= size; 1895 } 1896 1897 log_info(cds, heap)(SIZE_FORMAT_W(8) " huge objects (total " SIZE_FORMAT_W(8) " bytes" 1898 ", avg %8.1f bytes)", 1899 huge_count, huge_size * HeapWordSize, 1900 avg_size(huge_size, huge_count)); 1901 log_info(cds, heap)(SIZE_FORMAT_W(8) " total objects (total " SIZE_FORMAT_W(8) " bytes" 1902 ", avg %8.1f bytes)", 1903 _total_obj_count, _total_obj_size * HeapWordSize, 1904 avg_size(_total_obj_size, _total_obj_count)); 1905 } 1906 1907 #endif // INCLUDE_CDS_JAVA_HEAP