1 /* 2 * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveHeapLoader.hpp" 28 #include "cds/archiveUtils.hpp" 29 #include "cds/cdsHeapVerifier.hpp" 30 #include "cds/heapShared.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/classLoaderData.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/modules.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/systemDictionary.hpp" 38 #include "classfile/systemDictionaryShared.hpp" 39 #include "classfile/vmClasses.hpp" 40 #include "classfile/vmSymbols.hpp" 41 #include "gc/shared/collectedHeap.hpp" 42 #include "gc/shared/gcLocker.hpp" 43 #include "gc/shared/gcVMOperations.hpp" 44 #include "logging/log.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/iterator.inline.hpp" 47 #include "memory/resourceArea.hpp" 48 #include "memory/universe.hpp" 49 #include "oops/compressedOops.inline.hpp" 50 #include "oops/fieldStreams.inline.hpp" 51 #include "oops/objArrayOop.inline.hpp" 52 #include "oops/oop.inline.hpp" 53 #include "oops/typeArrayOop.inline.hpp" 54 #include "prims/jvmtiExport.hpp" 55 #include "runtime/fieldDescriptor.inline.hpp" 56 #include "runtime/init.hpp" 57 #include "runtime/javaCalls.hpp" 58 #include "runtime/safepointVerifiers.hpp" 59 #include "utilities/bitMap.inline.hpp" 60 #include "utilities/copy.hpp" 61 #if INCLUDE_G1GC 62 #include "gc/g1/g1CollectedHeap.hpp" 63 #endif 64 65 #if INCLUDE_CDS_JAVA_HEAP 66 67 struct ArchivableStaticFieldInfo { 68 const char* klass_name; 69 const char* field_name; 70 InstanceKlass* klass; 71 int offset; 72 BasicType type; 73 74 ArchivableStaticFieldInfo(const char* k, const char* f) 75 : klass_name(k), field_name(f), klass(NULL), offset(0), type(T_ILLEGAL) {} 76 77 bool valid() { 78 return klass_name != NULL; 79 } 80 }; 81 82 bool HeapShared::_disable_writing = false; 83 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 84 GrowableArrayCHeap<Metadata**, mtClassShared>* HeapShared::_native_pointers = NULL; 85 86 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS]; 87 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS]; 88 size_t HeapShared::_total_obj_count; 89 size_t HeapShared::_total_obj_size; 90 91 #ifndef PRODUCT 92 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects" 93 static Array<char>* _archived_ArchiveHeapTestClass = NULL; 94 static const char* _test_class_name = NULL; 95 static const Klass* _test_class = NULL; 96 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = NULL; 97 #endif 98 99 100 // 101 // If you add new entries to the following tables, you should know what you're doing! 102 // 103 104 // Entry fields for shareable subgraphs archived in the closed archive heap 105 // region. Warning: Objects in the subgraphs should not have reference fields 106 // assigned at runtime. 107 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 108 {"java/lang/Integer$IntegerCache", "archivedCache"}, 109 {"java/lang/Long$LongCache", "archivedCache"}, 110 {"java/lang/Byte$ByteCache", "archivedCache"}, 111 {"java/lang/Short$ShortCache", "archivedCache"}, 112 {"java/lang/Character$CharacterCache", "archivedCache"}, 113 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 114 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 115 {NULL, NULL}, 116 }; 117 // Entry fields for subgraphs archived in the open archive heap region. 118 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 119 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 120 {"java/util/ImmutableCollections", "archivedObjects"}, 121 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 122 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 123 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 124 #ifndef PRODUCT 125 {NULL, NULL}, // Extra slot for -XX:ArchiveHeapTestClass 126 #endif 127 {NULL, NULL}, 128 }; 129 130 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 131 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 132 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 133 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 134 {"java/lang/Module$ArchivedData", "archivedData"}, 135 {NULL, NULL}, 136 }; 137 138 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 139 OopHandle HeapShared::_roots; 140 141 #ifdef ASSERT 142 bool HeapShared::is_archived_object_during_dumptime(oop p) { 143 assert(HeapShared::can_write(), "must be"); 144 assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); 145 return Universe::heap()->is_archived_object(p); 146 } 147 #endif 148 149 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) { 150 for (int i = 0; fields[i].valid(); i++) { 151 if (fields[i].klass == ik) { 152 return true; 153 } 154 } 155 return false; 156 } 157 158 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 159 return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) || 160 is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) || 161 is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik); 162 } 163 164 unsigned HeapShared::oop_hash(oop const& p) { 165 // Do not call p->identity_hash() as that will update the 166 // object header. 167 return primitive_hash(cast_from_oop<intptr_t>(p)); 168 } 169 170 static void reset_states(oop obj, TRAPS) { 171 Handle h_obj(THREAD, obj); 172 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 173 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 174 Symbol* method_sig = vmSymbols::void_method_signature(); 175 176 while (klass != NULL) { 177 Method* method = klass->find_method(method_name, method_sig); 178 if (method != NULL) { 179 assert(method->is_private(), "must be"); 180 if (log_is_enabled(Debug, cds)) { 181 ResourceMark rm(THREAD); 182 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 183 } 184 JavaValue result(T_VOID); 185 JavaCalls::call_special(&result, h_obj, klass, 186 method_name, method_sig, CHECK); 187 } 188 klass = klass->java_super(); 189 } 190 } 191 192 void HeapShared::reset_archived_object_states(TRAPS) { 193 assert(DumpSharedSpaces, "dump-time only"); 194 log_debug(cds)("Resetting platform loader"); 195 reset_states(SystemDictionary::java_platform_loader(), CHECK); 196 log_debug(cds)("Resetting system loader"); 197 reset_states(SystemDictionary::java_system_loader(), CHECK); 198 199 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not 200 // directly used for class loading, but rather is used by the core library 201 // to keep track of resources, etc, loaded by the null class loader. 202 // 203 // Note, this object is non-null, and is not the same as 204 // ClassLoaderData::the_null_class_loader_data()->class_loader(), 205 // which is null. 206 log_debug(cds)("Resetting boot loader"); 207 JavaValue result(T_OBJECT); 208 JavaCalls::call_static(&result, 209 vmClasses::jdk_internal_loader_ClassLoaders_klass(), 210 vmSymbols::bootLoader_name(), 211 vmSymbols::void_BuiltinClassLoader_signature(), 212 CHECK); 213 Handle boot_loader(THREAD, result.get_oop()); 214 reset_states(boot_loader(), CHECK); 215 } 216 217 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 218 HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL; 219 oop HeapShared::find_archived_heap_object(oop obj) { 220 assert(DumpSharedSpaces, "dump-time only"); 221 ArchivedObjectCache* cache = archived_object_cache(); 222 CachedOopInfo* p = cache->get(obj); 223 if (p != NULL) { 224 return p->_obj; 225 } else { 226 return NULL; 227 } 228 } 229 230 int HeapShared::append_root(oop obj) { 231 assert(DumpSharedSpaces, "dump-time only"); 232 233 // No GC should happen since we aren't scanning _pending_roots. 234 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 235 236 if (_pending_roots == NULL) { 237 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 238 } 239 240 return _pending_roots->append(obj); 241 } 242 243 objArrayOop HeapShared::roots() { 244 if (DumpSharedSpaces) { 245 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 246 if (!HeapShared::can_write()) { 247 return NULL; 248 } 249 } else { 250 assert(UseSharedSpaces, "must be"); 251 } 252 253 objArrayOop roots = (objArrayOop)_roots.resolve(); 254 assert(roots != NULL, "should have been initialized"); 255 return roots; 256 } 257 258 // Returns an objArray that contains all the roots of the archived objects 259 oop HeapShared::get_root(int index, bool clear) { 260 assert(index >= 0, "sanity"); 261 if (DumpSharedSpaces) { 262 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 263 assert(_pending_roots != NULL, "sanity"); 264 return _pending_roots->at(index); 265 } else { 266 assert(UseSharedSpaces, "must be"); 267 assert(!_roots.is_empty(), "must have loaded shared heap"); 268 oop result = roots()->obj_at(index); 269 if (clear) { 270 clear_root(index); 271 } 272 return result; 273 } 274 } 275 276 void HeapShared::clear_root(int index) { 277 assert(index >= 0, "sanity"); 278 assert(UseSharedSpaces, "must be"); 279 if (ArchiveHeapLoader::is_fully_available()) { 280 if (log_is_enabled(Debug, cds, heap)) { 281 oop old = roots()->obj_at(index); 282 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 283 } 284 roots()->obj_at_put(index, NULL); 285 } 286 } 287 288 oop HeapShared::archive_object(oop obj) { 289 assert(DumpSharedSpaces, "dump-time only"); 290 291 assert(!obj->is_stackChunk(), "do not archive stack chunks"); 292 293 oop ao = find_archived_heap_object(obj); 294 if (ao != NULL) { 295 // already archived 296 return ao; 297 } 298 299 int len = obj->size(); 300 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 301 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 302 p2i(obj), (size_t)obj->size()); 303 return NULL; 304 } 305 306 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 307 if (archived_oop != NULL) { 308 count_allocation(len); 309 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 310 // Reinitialize markword to remove age/marking/locking/etc. 311 // 312 // We need to retain the identity_hash, because it may have been used by some hashtables 313 // in the shared heap. This also has the side effect of pre-initializing the 314 // identity_hash for all shared objects, so they are less likely to be written 315 // into during run time, increasing the potential of memory sharing. 316 int hash_original = obj->identity_hash(); 317 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original)); 318 assert(archived_oop->mark().is_unlocked(), "sanity"); 319 320 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 321 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 322 323 ArchivedObjectCache* cache = archived_object_cache(); 324 CachedOopInfo info = make_cached_oop_info(archived_oop); 325 cache->put(obj, info); 326 if (_original_object_table != NULL) { 327 _original_object_table->put(archived_oop, obj); 328 } 329 mark_native_pointers(obj, archived_oop); 330 if (log_is_enabled(Debug, cds, heap)) { 331 ResourceMark rm; 332 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 333 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 334 } 335 } else { 336 log_error(cds, heap)( 337 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 338 p2i(obj)); 339 log_error(cds)("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 340 SIZE_FORMAT "M", MaxHeapSize/M); 341 os::_exit(-1); 342 } 343 return archived_oop; 344 } 345 346 void HeapShared::archive_klass_objects() { 347 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 348 assert(klasses != NULL, "sanity"); 349 for (int i = 0; i < klasses->length(); i++) { 350 Klass* k = ArchiveBuilder::get_buffered_klass(klasses->at(i)); 351 352 // archive mirror object 353 java_lang_Class::archive_mirror(k); 354 355 // archive the resolved_referenes array 356 if (k->is_instance_klass()) { 357 InstanceKlass* ik = InstanceKlass::cast(k); 358 ik->constants()->archive_resolved_references(); 359 } 360 } 361 } 362 363 void HeapShared::mark_native_pointers(oop orig_obj, oop archived_obj) { 364 if (java_lang_Class::is_instance(orig_obj)) { 365 mark_one_native_pointer(archived_obj, java_lang_Class::klass_offset()); 366 mark_one_native_pointer(archived_obj, java_lang_Class::array_klass_offset()); 367 } 368 } 369 370 void HeapShared::mark_one_native_pointer(oop archived_obj, int offset) { 371 Metadata* ptr = archived_obj->metadata_field_acquire(offset); 372 if (ptr != NULL) { 373 // Set the native pointer to the requested address (at runtime, if the metadata 374 // is mapped at the default location, it will be at this address). 375 address buffer_addr = ArchiveBuilder::current()->get_buffered_addr((address)ptr); 376 address requested_addr = ArchiveBuilder::current()->to_requested(buffer_addr); 377 archived_obj->metadata_field_put(offset, (Metadata*)requested_addr); 378 379 // Remember this pointer. At runtime, if the metadata is mapped at a non-default 380 // location, the pointer needs to be patched (see ArchiveHeapLoader::patch_native_pointers()). 381 _native_pointers->append(archived_obj->field_addr<Metadata*>(offset)); 382 383 log_debug(cds, heap, mirror)( 384 "Marked metadata field at %d: " PTR_FORMAT " ==> " PTR_FORMAT, 385 offset, p2i(ptr), p2i(requested_addr)); 386 } 387 } 388 389 // -- Handling of Enum objects 390 // Java Enum classes have synthetic <clinit> methods that look like this 391 // enum MyEnum {FOO, BAR} 392 // MyEnum::<clinint> { 393 // /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO"); 394 // /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR"); 395 // } 396 // 397 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must 398 // ensure the archived value equals (in object address) to the runtime value of 399 // MyEnum::FOO. 400 // 401 // However, since MyEnum::<clinint> is synthetically generated by javac, there's 402 // no way of programmatically handling this inside the Java code (as you would handle 403 // ModuleLayer::EMPTY_LAYER, for example). 404 // 405 // Instead, we archive all static field of such Enum classes. At runtime, 406 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull 407 // the static fields out of the archived heap. 408 void HeapShared::check_enum_obj(int level, 409 KlassSubGraphInfo* subgraph_info, 410 oop orig_obj, 411 bool is_closed_archive) { 412 Klass* k = orig_obj->klass(); 413 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k); 414 if (!k->is_instance_klass()) { 415 return; 416 } 417 InstanceKlass* ik = InstanceKlass::cast(k); 418 if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) { 419 ResourceMark rm; 420 ik->set_has_archived_enum_objs(); 421 buffered_k->set_has_archived_enum_objs(); 422 oop mirror = ik->java_mirror(); 423 424 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 425 if (fs.access_flags().is_static()) { 426 fieldDescriptor& fd = fs.field_descriptor(); 427 if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) { 428 guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY", 429 ik->external_name(), fd.name()->as_C_string()); 430 } 431 oop oop_field = mirror->obj_field(fd.offset()); 432 if (oop_field == NULL) { 433 guarantee(false, "static field %s::%s must not be null", 434 ik->external_name(), fd.name()->as_C_string()); 435 } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) { 436 guarantee(false, "static field %s::%s is of the wrong type", 437 ik->external_name(), fd.name()->as_C_string()); 438 } 439 oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive); 440 int root_index = append_root(archived_oop_field); 441 log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")", 442 root_index, ik->external_name(), fd.name()->as_C_string(), 443 p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field)); 444 SystemDictionaryShared::add_enum_klass_static_field(ik, root_index); 445 } 446 } 447 } 448 } 449 450 // See comments in HeapShared::check_enum_obj() 451 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) { 452 if (!ArchiveHeapLoader::is_fully_available()) { 453 return false; 454 } 455 456 RunTimeClassInfo* info = RunTimeClassInfo::get_for(k); 457 assert(info != NULL, "sanity"); 458 459 if (log_is_enabled(Info, cds, heap)) { 460 ResourceMark rm; 461 log_info(cds, heap)("Initializing Enum class: %s", k->external_name()); 462 } 463 464 oop mirror = k->java_mirror(); 465 int i = 0; 466 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 467 if (fs.access_flags().is_static()) { 468 int root_index = info->enum_klass_static_field_root_index_at(i++); 469 fieldDescriptor& fd = fs.field_descriptor(); 470 assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be"); 471 mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true)); 472 } 473 } 474 return true; 475 } 476 477 void HeapShared::run_full_gc_in_vm_thread() { 478 if (HeapShared::can_write()) { 479 // Avoid fragmentation while archiving heap objects. 480 // We do this inside a safepoint, so that no further allocation can happen after GC 481 // has finished. 482 if (GCLocker::is_active()) { 483 // Just checking for safety ... 484 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 485 // has been modified such that JNI code is executed in some clean up threads after 486 // we have finished class loading. 487 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 488 } else { 489 log_info(cds)("Run GC ..."); 490 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 491 log_info(cds)("Run GC done"); 492 } 493 } 494 } 495 496 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions, 497 GrowableArray<MemRegion>* open_regions) { 498 499 G1HeapVerifier::verify_ready_for_archiving(); 500 501 { 502 NoSafepointVerifier nsv; 503 504 // Cache for recording where the archived objects are copied to 505 create_archived_object_cache(log_is_enabled(Info, cds, map)); 506 507 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 508 UseCompressedOops ? p2i(CompressedOops::begin()) : 509 p2i((address)G1CollectedHeap::heap()->reserved().start()), 510 UseCompressedOops ? p2i(CompressedOops::end()) : 511 p2i((address)G1CollectedHeap::heap()->reserved().end())); 512 log_info(cds)("Dumping objects to closed archive heap region ..."); 513 copy_closed_objects(closed_regions); 514 515 log_info(cds)("Dumping objects to open archive heap region ..."); 516 copy_open_objects(open_regions); 517 518 CDSHeapVerifier::verify(); 519 } 520 521 G1HeapVerifier::verify_archive_regions(); 522 } 523 524 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) { 525 assert(HeapShared::can_write(), "must be"); 526 527 G1CollectedHeap::heap()->begin_archive_alloc_range(); 528 529 // Archive interned string objects 530 StringTable::write_to_archive(_dumped_interned_strings); 531 532 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 533 true /* is_closed_archive */, 534 false /* is_full_module_graph */); 535 536 G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions, 537 os::vm_allocation_granularity()); 538 } 539 540 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) { 541 assert(HeapShared::can_write(), "must be"); 542 543 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 544 545 java_lang_Class::archive_basic_type_mirrors(); 546 547 archive_klass_objects(); 548 549 archive_object_subgraphs(open_archive_subgraph_entry_fields, 550 false /* is_closed_archive */, 551 false /* is_full_module_graph */); 552 if (MetaspaceShared::use_full_module_graph()) { 553 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 554 false /* is_closed_archive */, 555 true /* is_full_module_graph */); 556 Modules::verify_archived_modules(); 557 } 558 559 copy_roots(); 560 561 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions, 562 os::vm_allocation_granularity()); 563 } 564 565 // Copy _pending_archive_roots into an objArray 566 void HeapShared::copy_roots() { 567 // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the 568 // objects in this array are discovered during HeapShared::archive_objects(). For example, 569 // in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj(). 570 // However, HeapShared::archive_objects() happens inside a safepoint, so we can't 571 // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object(). 572 // Instead, we have to roll our own alloc/copy routine here. 573 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 574 size_t size = objArrayOopDesc::object_size(length); 575 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 576 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 577 578 memset(mem, 0, size * BytesPerWord); 579 { 580 // This is copied from MemAllocator::finish 581 oopDesc::set_mark(mem, markWord::prototype()); 582 oopDesc::release_set_klass(mem, k); 583 } 584 { 585 // This is copied from ObjArrayAllocator::initialize 586 arrayOopDesc::set_length(mem, length); 587 } 588 589 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 590 for (int i = 0; i < length; i++) { 591 roots()->obj_at_put(i, _pending_roots->at(i)); 592 } 593 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem); 594 count_allocation(roots()->size()); 595 } 596 597 // 598 // Subgraph archiving support 599 // 600 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 601 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 602 603 // Get the subgraph_info for Klass k. A new subgraph_info is created if 604 // there is no existing one for k. The subgraph_info records the "buffered" 605 // address of the class. 606 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 607 assert(DumpSharedSpaces, "dump time only"); 608 bool created; 609 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k); 610 KlassSubGraphInfo* info = 611 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(buffered_k, is_full_module_graph), 612 &created); 613 assert(created, "must not initialize twice"); 614 return info; 615 } 616 617 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 618 assert(DumpSharedSpaces, "dump time only"); 619 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 620 assert(info != NULL, "must have been initialized"); 621 return info; 622 } 623 624 // Add an entry field to the current KlassSubGraphInfo. 625 void KlassSubGraphInfo::add_subgraph_entry_field( 626 int static_field_offset, oop v, bool is_closed_archive) { 627 assert(DumpSharedSpaces, "dump time only"); 628 if (_subgraph_entry_fields == NULL) { 629 _subgraph_entry_fields = 630 new (mtClass) GrowableArray<int>(10, mtClass); 631 } 632 _subgraph_entry_fields->append(static_field_offset); 633 _subgraph_entry_fields->append(HeapShared::append_root(v)); 634 } 635 636 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 637 // Only objects of boot classes can be included in sub-graph. 638 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 639 assert(DumpSharedSpaces, "dump time only"); 640 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k); 641 642 if (_subgraph_object_klasses == NULL) { 643 _subgraph_object_klasses = 644 new (mtClass) GrowableArray<Klass*>(50, mtClass); 645 } 646 647 assert(ArchiveBuilder::current()->is_in_buffer_space(buffered_k), "must be a shared class"); 648 649 if (_k == buffered_k) { 650 // Don't add the Klass containing the sub-graph to it's own klass 651 // initialization list. 652 return; 653 } 654 655 if (buffered_k->is_instance_klass()) { 656 assert(InstanceKlass::cast(buffered_k)->is_shared_boot_class(), 657 "must be boot class"); 658 // vmClasses::xxx_klass() are not updated, need to check 659 // the original Klass* 660 if (orig_k == vmClasses::String_klass() || 661 orig_k == vmClasses::Object_klass()) { 662 // Initialized early during VM initialization. No need to be added 663 // to the sub-graph object class list. 664 return; 665 } 666 check_allowed_klass(InstanceKlass::cast(orig_k)); 667 } else if (buffered_k->is_objArray_klass()) { 668 Klass* abk = ObjArrayKlass::cast(buffered_k)->bottom_klass(); 669 if (abk->is_instance_klass()) { 670 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 671 "must be boot class"); 672 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass())); 673 } 674 if (buffered_k == Universe::objectArrayKlassObj()) { 675 // Initialized early during Universe::genesis. No need to be added 676 // to the list. 677 return; 678 } 679 } else { 680 assert(buffered_k->is_typeArray_klass(), "must be"); 681 // Primitive type arrays are created early during Universe::genesis. 682 return; 683 } 684 685 if (log_is_enabled(Debug, cds, heap)) { 686 if (!_subgraph_object_klasses->contains(buffered_k)) { 687 ResourceMark rm; 688 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 689 } 690 } 691 692 _subgraph_object_klasses->append_if_missing(buffered_k); 693 _has_non_early_klasses |= is_non_early_klass(orig_k); 694 } 695 696 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { 697 if (ik->module()->name() == vmSymbols::java_base()) { 698 assert(ik->package() != NULL, "classes in java.base cannot be in unnamed package"); 699 return; 700 } 701 702 #ifndef PRODUCT 703 if (!ik->module()->is_named() && ik->package() == NULL) { 704 // This class is loaded by ArchiveHeapTestClass 705 return; 706 } 707 const char* extra_msg = ", or in an unnamed package of an unnamed module"; 708 #else 709 const char* extra_msg = ""; 710 #endif 711 712 ResourceMark rm; 713 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s", 714 ik->external_name(), extra_msg); 715 os::_exit(1); 716 } 717 718 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 719 if (k->is_objArray_klass()) { 720 k = ObjArrayKlass::cast(k)->bottom_klass(); 721 } 722 if (k->is_instance_klass()) { 723 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 724 ResourceMark rm; 725 log_info(cds, heap)("non-early: %s", k->external_name()); 726 return true; 727 } else { 728 return false; 729 } 730 } else { 731 return false; 732 } 733 } 734 735 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 736 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 737 _k = info->klass(); 738 _entry_field_records = NULL; 739 _subgraph_object_klasses = NULL; 740 _is_full_module_graph = info->is_full_module_graph(); 741 742 if (_is_full_module_graph) { 743 // Consider all classes referenced by the full module graph as early -- we will be 744 // allocating objects of these classes during JVMTI early phase, so they cannot 745 // be processed by (non-early) JVMTI ClassFileLoadHook 746 _has_non_early_klasses = false; 747 } else { 748 _has_non_early_klasses = info->has_non_early_klasses(); 749 } 750 751 if (_has_non_early_klasses) { 752 ResourceMark rm; 753 log_info(cds, heap)( 754 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 755 _k->external_name()); 756 } 757 758 // populate the entry fields 759 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 760 if (entry_fields != NULL) { 761 int num_entry_fields = entry_fields->length(); 762 assert(num_entry_fields % 2 == 0, "sanity"); 763 _entry_field_records = 764 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 765 for (int i = 0 ; i < num_entry_fields; i++) { 766 _entry_field_records->at_put(i, entry_fields->at(i)); 767 } 768 } 769 770 // the Klasses of the objects in the sub-graphs 771 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 772 if (subgraph_object_klasses != NULL) { 773 int num_subgraphs_klasses = subgraph_object_klasses->length(); 774 _subgraph_object_klasses = 775 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 776 for (int i = 0; i < num_subgraphs_klasses; i++) { 777 Klass* subgraph_k = subgraph_object_klasses->at(i); 778 if (log_is_enabled(Info, cds, heap)) { 779 ResourceMark rm; 780 log_info(cds, heap)( 781 "Archived object klass %s (%2d) => %s", 782 _k->external_name(), i, subgraph_k->external_name()); 783 } 784 _subgraph_object_klasses->at_put(i, subgraph_k); 785 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 786 } 787 } 788 789 ArchivePtrMarker::mark_pointer(&_k); 790 ArchivePtrMarker::mark_pointer(&_entry_field_records); 791 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 792 } 793 794 struct CopyKlassSubGraphInfoToArchive : StackObj { 795 CompactHashtableWriter* _writer; 796 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 797 798 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 799 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 800 ArchivedKlassSubGraphInfoRecord* record = 801 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 802 record->init(&info); 803 804 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass); 805 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k); 806 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 807 _writer->add(hash, delta); 808 } 809 return true; // keep on iterating 810 } 811 }; 812 813 // Build the records of archived subgraph infos, which include: 814 // - Entry points to all subgraphs from the containing class mirror. The entry 815 // points are static fields in the mirror. For each entry point, the field 816 // offset, value and is_closed_archive flag are recorded in the sub-graph 817 // info. The value is stored back to the corresponding field at runtime. 818 // - A list of klasses that need to be loaded/initialized before archived 819 // java object sub-graph can be accessed at runtime. 820 void HeapShared::write_subgraph_info_table() { 821 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 822 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 823 CompactHashtableStats stats; 824 825 _run_time_subgraph_info_table.reset(); 826 827 CompactHashtableWriter writer(d_table->_count, &stats); 828 CopyKlassSubGraphInfoToArchive copy(&writer); 829 d_table->iterate(©); 830 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 831 832 #ifndef PRODUCT 833 if (ArchiveHeapTestClass != NULL) { 834 size_t len = strlen(ArchiveHeapTestClass) + 1; 835 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len); 836 strncpy(array->adr_at(0), ArchiveHeapTestClass, len); 837 _archived_ArchiveHeapTestClass = array; 838 } 839 #endif 840 if (log_is_enabled(Info, cds, heap)) { 841 print_stats(); 842 } 843 } 844 845 void HeapShared::serialize_root(SerializeClosure* soc) { 846 oop roots_oop = NULL; 847 848 if (soc->reading()) { 849 soc->do_oop(&roots_oop); // read from archive 850 assert(oopDesc::is_oop_or_null(roots_oop), "is oop"); 851 // Create an OopHandle only if we have actually mapped or loaded the roots 852 if (roots_oop != NULL) { 853 assert(ArchiveHeapLoader::is_fully_available(), "must be"); 854 _roots = OopHandle(Universe::vm_global(), roots_oop); 855 } 856 } else { 857 // writing 858 roots_oop = roots(); 859 soc->do_oop(&roots_oop); // write to archive 860 } 861 } 862 863 void HeapShared::serialize_tables(SerializeClosure* soc) { 864 865 #ifndef PRODUCT 866 soc->do_ptr((void**)&_archived_ArchiveHeapTestClass); 867 if (soc->reading() && _archived_ArchiveHeapTestClass != NULL) { 868 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0); 869 setup_test_class(_test_class_name); 870 } 871 #endif 872 873 _run_time_subgraph_info_table.serialize_header(soc); 874 } 875 876 static void verify_the_heap(Klass* k, const char* which) { 877 if (VerifyArchivedFields > 0) { 878 ResourceMark rm; 879 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 880 which, k->external_name()); 881 882 VM_Verify verify_op; 883 VMThread::execute(&verify_op); 884 885 if (VerifyArchivedFields > 1 && is_init_completed()) { 886 // At this time, the oop->klass() of some archived objects in the heap may not 887 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 888 // have enough information (object size, oop maps, etc) so that a GC can be safely 889 // performed. 890 // 891 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 892 // to check for GC safety. 893 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 894 which, k->external_name()); 895 FlagSetting fs1(VerifyBeforeGC, true); 896 FlagSetting fs2(VerifyDuringGC, true); 897 FlagSetting fs3(VerifyAfterGC, true); 898 Universe::heap()->collect(GCCause::_java_lang_system_gc); 899 } 900 } 901 } 902 903 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 904 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 905 // 906 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 907 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 908 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 909 void HeapShared::resolve_classes(JavaThread* current) { 910 assert(UseSharedSpaces, "runtime only!"); 911 if (!ArchiveHeapLoader::is_fully_available()) { 912 return; // nothing to do 913 } 914 resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields); 915 resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields); 916 resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields); 917 } 918 919 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { 920 for (int i = 0; fields[i].valid(); i++) { 921 ArchivableStaticFieldInfo* info = &fields[i]; 922 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 923 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 924 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 925 resolve_classes_for_subgraph_of(current, k); 926 } 927 } 928 929 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) { 930 JavaThread* THREAD = current; 931 ExceptionMark em(THREAD); 932 const ArchivedKlassSubGraphInfoRecord* record = 933 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 934 if (HAS_PENDING_EXCEPTION) { 935 CLEAR_PENDING_EXCEPTION; 936 } 937 if (record == NULL) { 938 clear_archived_roots_of(k); 939 } 940 } 941 942 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) { 943 JavaThread* THREAD = current; 944 if (!ArchiveHeapLoader::is_fully_available()) { 945 return; // nothing to do 946 } 947 948 ExceptionMark em(THREAD); 949 const ArchivedKlassSubGraphInfoRecord* record = 950 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 951 952 if (HAS_PENDING_EXCEPTION) { 953 CLEAR_PENDING_EXCEPTION; 954 // None of the field value will be set if there was an exception when initializing the classes. 955 // The java code will not see any of the archived objects in the 956 // subgraphs referenced from k in this case. 957 return; 958 } 959 960 if (record != NULL) { 961 init_archived_fields_for(k, record); 962 } 963 } 964 965 const ArchivedKlassSubGraphInfoRecord* 966 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 967 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 968 969 if (!k->is_shared()) { 970 return NULL; 971 } 972 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 973 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 974 975 #ifndef PRODUCT 976 if (_test_class_name != NULL && k->name()->equals(_test_class_name) && record != NULL) { 977 _test_class = k; 978 _test_class_record = record; 979 } 980 #endif 981 982 // Initialize from archived data. Currently this is done only 983 // during VM initialization time. No lock is needed. 984 if (record != NULL) { 985 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 986 if (log_is_enabled(Info, cds, heap)) { 987 ResourceMark rm(THREAD); 988 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 989 k->external_name()); 990 } 991 return NULL; 992 } 993 994 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 995 if (log_is_enabled(Info, cds, heap)) { 996 ResourceMark rm(THREAD); 997 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 998 k->external_name()); 999 } 1000 return NULL; 1001 } 1002 1003 if (log_is_enabled(Info, cds, heap)) { 1004 ResourceMark rm; 1005 log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name()); 1006 } 1007 1008 resolve_or_init(k, do_init, CHECK_NULL); 1009 1010 // Load/link/initialize the klasses of the objects in the subgraph. 1011 // NULL class loader is used. 1012 Array<Klass*>* klasses = record->subgraph_object_klasses(); 1013 if (klasses != NULL) { 1014 for (int i = 0; i < klasses->length(); i++) { 1015 Klass* klass = klasses->at(i); 1016 if (!klass->is_shared()) { 1017 return NULL; 1018 } 1019 resolve_or_init(klass, do_init, CHECK_NULL); 1020 } 1021 } 1022 } 1023 1024 return record; 1025 } 1026 1027 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 1028 if (!do_init) { 1029 if (k->class_loader_data() == NULL) { 1030 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 1031 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 1032 } 1033 } else { 1034 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 1035 if (k->is_instance_klass()) { 1036 InstanceKlass* ik = InstanceKlass::cast(k); 1037 ik->initialize(CHECK); 1038 } else if (k->is_objArray_klass()) { 1039 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 1040 oak->initialize(CHECK); 1041 } 1042 } 1043 } 1044 1045 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 1046 verify_the_heap(k, "before"); 1047 1048 // Load the subgraph entry fields from the record and store them back to 1049 // the corresponding fields within the mirror. 1050 oop m = k->java_mirror(); 1051 Array<int>* entry_field_records = record->entry_field_records(); 1052 if (entry_field_records != NULL) { 1053 int efr_len = entry_field_records->length(); 1054 assert(efr_len % 2 == 0, "sanity"); 1055 for (int i = 0; i < efr_len; i += 2) { 1056 int field_offset = entry_field_records->at(i); 1057 int root_index = entry_field_records->at(i+1); 1058 oop v = get_root(root_index, /*clear=*/true); 1059 m->obj_field_put(field_offset, v); 1060 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 1061 } 1062 1063 // Done. Java code can see the archived sub-graphs referenced from k's 1064 // mirror after this point. 1065 if (log_is_enabled(Info, cds, heap)) { 1066 ResourceMark rm; 1067 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 1068 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 1069 } 1070 } 1071 1072 verify_the_heap(k, "after "); 1073 } 1074 1075 void HeapShared::clear_archived_roots_of(Klass* k) { 1076 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1077 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1078 if (record != NULL) { 1079 Array<int>* entry_field_records = record->entry_field_records(); 1080 if (entry_field_records != NULL) { 1081 int efr_len = entry_field_records->length(); 1082 assert(efr_len % 2 == 0, "sanity"); 1083 for (int i = 0; i < efr_len; i += 2) { 1084 int root_index = entry_field_records->at(i+1); 1085 clear_root(root_index); 1086 } 1087 } 1088 } 1089 } 1090 1091 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 1092 int _level; 1093 bool _is_closed_archive; 1094 bool _record_klasses_only; 1095 KlassSubGraphInfo* _subgraph_info; 1096 oop _orig_referencing_obj; 1097 oop _archived_referencing_obj; 1098 1099 // The following are for maintaining a stack for determining 1100 // CachedOopInfo::_referrer 1101 static WalkOopAndArchiveClosure* _current; 1102 WalkOopAndArchiveClosure* _last; 1103 public: 1104 WalkOopAndArchiveClosure(int level, 1105 bool is_closed_archive, 1106 bool record_klasses_only, 1107 KlassSubGraphInfo* subgraph_info, 1108 oop orig, oop archived) : 1109 _level(level), _is_closed_archive(is_closed_archive), 1110 _record_klasses_only(record_klasses_only), 1111 _subgraph_info(subgraph_info), 1112 _orig_referencing_obj(orig), _archived_referencing_obj(archived) { 1113 _last = _current; 1114 _current = this; 1115 } 1116 ~WalkOopAndArchiveClosure() { 1117 _current = _last; 1118 } 1119 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1120 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1121 1122 protected: 1123 template <class T> void do_oop_work(T *p) { 1124 oop obj = RawAccess<>::oop_load(p); 1125 if (!CompressedOops::is_null(obj)) { 1126 assert(!HeapShared::is_archived_object_during_dumptime(obj), 1127 "original objects must not point to archived objects"); 1128 1129 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 1130 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 1131 1132 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 1133 ResourceMark rm; 1134 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level, 1135 _orig_referencing_obj->klass()->external_name(), field_delta, 1136 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1137 LogTarget(Trace, cds, heap) log; 1138 LogStream out(log); 1139 obj->print_on(&out); 1140 } 1141 1142 oop archived = HeapShared::archive_reachable_objects_from( 1143 _level + 1, _subgraph_info, obj, _is_closed_archive); 1144 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 1145 assert(HeapShared::is_archived_object_during_dumptime(archived), "must be"); 1146 1147 if (!_record_klasses_only) { 1148 // Update the reference in the archived copy of the referencing object. 1149 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 1150 _level, p2i(new_p), p2i(obj), p2i(archived)); 1151 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 1152 } 1153 } 1154 } 1155 1156 public: 1157 static WalkOopAndArchiveClosure* current() { return _current; } 1158 oop orig_referencing_obj() { return _orig_referencing_obj; } 1159 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1160 }; 1161 1162 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL; 1163 1164 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) { 1165 CachedOopInfo info; 1166 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); 1167 1168 info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info(); 1169 info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj(); 1170 info._obj = orig_obj; 1171 1172 return info; 1173 } 1174 1175 void HeapShared::check_closed_region_object(InstanceKlass* k) { 1176 // Check fields in the object 1177 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 1178 if (!fs.access_flags().is_static()) { 1179 BasicType ft = fs.field_descriptor().field_type(); 1180 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 1181 ResourceMark rm; 1182 log_warning(cds, heap)( 1183 "Please check reference field in %s instance in closed archive heap region: %s %s", 1184 k->external_name(), (fs.name())->as_C_string(), 1185 (fs.signature())->as_C_string()); 1186 } 1187 } 1188 } 1189 } 1190 1191 // (1) If orig_obj has not been archived yet, archive it. 1192 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1193 // trace all objects that are reachable from it, and make sure these objects are archived. 1194 // (3) Record the klasses of all orig_obj and all reachable objects. 1195 oop HeapShared::archive_reachable_objects_from(int level, 1196 KlassSubGraphInfo* subgraph_info, 1197 oop orig_obj, 1198 bool is_closed_archive) { 1199 assert(orig_obj != NULL, "must be"); 1200 assert(!is_archived_object_during_dumptime(orig_obj), "sanity"); 1201 1202 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1203 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1204 // If you get an error here, you probably made a change in the JDK library that has added 1205 // these objects that are referenced (directly or indirectly) by static fields. 1206 ResourceMark rm; 1207 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 1208 os::_exit(1); 1209 } 1210 1211 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 1212 // them as Klass::_archived_mirror because they need to be specially restored at run time. 1213 // 1214 // If you get an error here, you probably made a change in the JDK library that has added a Class 1215 // object that is referenced (directly or indirectly) by static fields. 1216 if (java_lang_Class::is_instance(orig_obj)) { 1217 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1218 os::_exit(1); 1219 } 1220 1221 oop archived_obj = find_archived_heap_object(orig_obj); 1222 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 1223 // To save time, don't walk strings that are already archived. They just contain 1224 // pointers to a type array, whose klass doesn't need to be recorded. 1225 return archived_obj; 1226 } 1227 1228 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1229 // orig_obj has already been archived and traced. Nothing more to do. 1230 return archived_obj; 1231 } else { 1232 set_has_been_seen_during_subgraph_recording(orig_obj); 1233 } 1234 1235 bool record_klasses_only = (archived_obj != NULL); 1236 if (archived_obj == NULL) { 1237 ++_num_new_archived_objs; 1238 archived_obj = archive_object(orig_obj); 1239 if (archived_obj == NULL) { 1240 // Skip archiving the sub-graph referenced from the current entry field. 1241 ResourceMark rm; 1242 log_error(cds, heap)( 1243 "Cannot archive the sub-graph referenced from %s object (" 1244 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.", 1245 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1246 if (level == 1) { 1247 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1248 // as the Java code will take care of initializing this field dynamically. 1249 return NULL; 1250 } else { 1251 // We don't know how to handle an object that has been archived, but some of its reachable 1252 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1253 // we have a real use case. 1254 os::_exit(1); 1255 } 1256 } 1257 1258 if (java_lang_Module::is_instance(orig_obj)) { 1259 if (Modules::check_module_oop(orig_obj)) { 1260 Modules::update_oops_in_archived_module(orig_obj, append_root(archived_obj)); 1261 } 1262 java_lang_Module::set_module_entry(archived_obj, NULL); 1263 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1264 // class_data will be restored explicitly at run time. 1265 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1266 orig_obj == SystemDictionary::java_system_loader() || 1267 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be"); 1268 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1269 } 1270 } 1271 1272 assert(archived_obj != NULL, "must be"); 1273 Klass *orig_k = orig_obj->klass(); 1274 subgraph_info->add_subgraph_object_klass(orig_k); 1275 1276 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1277 subgraph_info, orig_obj, archived_obj); 1278 orig_obj->oop_iterate(&walker); 1279 if (is_closed_archive && orig_k->is_instance_klass()) { 1280 check_closed_region_object(InstanceKlass::cast(orig_k)); 1281 } 1282 1283 check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive); 1284 return archived_obj; 1285 } 1286 1287 // 1288 // Start from the given static field in a java mirror and archive the 1289 // complete sub-graph of java heap objects that are reached directly 1290 // or indirectly from the starting object by following references. 1291 // Sub-graph archiving restrictions (current): 1292 // 1293 // - All classes of objects in the archived sub-graph (including the 1294 // entry class) must be boot class only. 1295 // - No java.lang.Class instance (java mirror) can be included inside 1296 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1297 // 1298 // The Java heap object sub-graph archiving process (see 1299 // WalkOopAndArchiveClosure): 1300 // 1301 // 1) Java object sub-graph archiving starts from a given static field 1302 // within a Class instance (java mirror). If the static field is a 1303 // reference field and points to a non-null java object, proceed to 1304 // the next step. 1305 // 1306 // 2) Archives the referenced java object. If an archived copy of the 1307 // current object already exists, updates the pointer in the archived 1308 // copy of the referencing object to point to the current archived object. 1309 // Otherwise, proceed to the next step. 1310 // 1311 // 3) Follows all references within the current java object and recursively 1312 // archive the sub-graph of objects starting from each reference. 1313 // 1314 // 4) Updates the pointer in the archived copy of referencing object to 1315 // point to the current archived object. 1316 // 1317 // 5) The Klass of the current java object is added to the list of Klasses 1318 // for loading and initializing before any object in the archived graph can 1319 // be accessed at runtime. 1320 // 1321 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1322 const char* klass_name, 1323 int field_offset, 1324 const char* field_name, 1325 bool is_closed_archive) { 1326 assert(DumpSharedSpaces, "dump time only"); 1327 assert(k->is_shared_boot_class(), "must be boot class"); 1328 1329 oop m = k->java_mirror(); 1330 1331 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1332 oop f = m->obj_field(field_offset); 1333 1334 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1335 1336 if (!CompressedOops::is_null(f)) { 1337 if (log_is_enabled(Trace, cds, heap)) { 1338 LogTarget(Trace, cds, heap) log; 1339 LogStream out(log); 1340 f->print_on(&out); 1341 } 1342 1343 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1344 1345 if (af == NULL) { 1346 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1347 klass_name, field_name); 1348 } else { 1349 // Note: the field value is not preserved in the archived mirror. 1350 // Record the field as a new subGraph entry point. The recorded 1351 // information is restored from the archive at runtime. 1352 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1353 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1354 } 1355 } else { 1356 // The field contains null, we still need to record the entry point, 1357 // so it can be restored at runtime. 1358 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1359 } 1360 } 1361 1362 #ifndef PRODUCT 1363 class VerifySharedOopClosure: public BasicOopIterateClosure { 1364 private: 1365 bool _is_archived; 1366 1367 public: 1368 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1369 1370 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1371 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1372 1373 protected: 1374 template <class T> void do_oop_work(T *p) { 1375 oop obj = RawAccess<>::oop_load(p); 1376 if (!CompressedOops::is_null(obj)) { 1377 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1378 } 1379 } 1380 }; 1381 1382 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1383 assert(DumpSharedSpaces, "dump time only"); 1384 assert(k->is_shared_boot_class(), "must be boot class"); 1385 1386 oop m = k->java_mirror(); 1387 oop f = m->obj_field(field_offset); 1388 if (!CompressedOops::is_null(f)) { 1389 verify_subgraph_from(f); 1390 } 1391 } 1392 1393 void HeapShared::verify_subgraph_from(oop orig_obj) { 1394 oop archived_obj = find_archived_heap_object(orig_obj); 1395 if (archived_obj == NULL) { 1396 // It's OK for the root of a subgraph to be not archived. See comments in 1397 // archive_reachable_objects_from(). 1398 return; 1399 } 1400 1401 // Verify that all objects reachable from orig_obj are archived. 1402 init_seen_objects_table(); 1403 verify_reachable_objects_from(orig_obj, false); 1404 delete_seen_objects_table(); 1405 1406 // Note: we could also verify that all objects reachable from the archived 1407 // copy of orig_obj can only point to archived objects, with: 1408 // init_seen_objects_table(); 1409 // verify_reachable_objects_from(archived_obj, true); 1410 // init_seen_objects_table(); 1411 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1412 // won't do it here. 1413 } 1414 1415 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1416 _num_total_verifications ++; 1417 if (!has_been_seen_during_subgraph_recording(obj)) { 1418 set_has_been_seen_during_subgraph_recording(obj); 1419 1420 if (is_archived) { 1421 assert(is_archived_object_during_dumptime(obj), "must be"); 1422 assert(find_archived_heap_object(obj) == NULL, "must be"); 1423 } else { 1424 assert(!is_archived_object_during_dumptime(obj), "must be"); 1425 assert(find_archived_heap_object(obj) != NULL, "must be"); 1426 } 1427 1428 VerifySharedOopClosure walker(is_archived); 1429 obj->oop_iterate(&walker); 1430 } 1431 } 1432 #endif 1433 1434 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1435 int HeapShared::_num_new_walked_objs; 1436 int HeapShared::_num_new_archived_objs; 1437 int HeapShared::_num_old_recorded_klasses; 1438 1439 int HeapShared::_num_total_subgraph_recordings = 0; 1440 int HeapShared::_num_total_walked_objs = 0; 1441 int HeapShared::_num_total_archived_objs = 0; 1442 int HeapShared::_num_total_recorded_klasses = 0; 1443 int HeapShared::_num_total_verifications = 0; 1444 1445 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1446 return _seen_objects_table->get(obj) != NULL; 1447 } 1448 1449 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1450 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1451 _seen_objects_table->put(obj, true); 1452 ++ _num_new_walked_objs; 1453 } 1454 1455 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1456 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1457 init_subgraph_info(k, is_full_module_graph); 1458 init_seen_objects_table(); 1459 _num_new_walked_objs = 0; 1460 _num_new_archived_objs = 0; 1461 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1462 } 1463 1464 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1465 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1466 _num_old_recorded_klasses; 1467 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1468 "walked %d objs, archived %d new objs, recorded %d classes", 1469 class_name, _num_new_walked_objs, _num_new_archived_objs, 1470 num_new_recorded_klasses); 1471 1472 delete_seen_objects_table(); 1473 1474 _num_total_subgraph_recordings ++; 1475 _num_total_walked_objs += _num_new_walked_objs; 1476 _num_total_archived_objs += _num_new_archived_objs; 1477 _num_total_recorded_klasses += num_new_recorded_klasses; 1478 } 1479 1480 class ArchivableStaticFieldFinder: public FieldClosure { 1481 InstanceKlass* _ik; 1482 Symbol* _field_name; 1483 bool _found; 1484 int _offset; 1485 public: 1486 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1487 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1488 1489 virtual void do_field(fieldDescriptor* fd) { 1490 if (fd->name() == _field_name) { 1491 assert(!_found, "fields can never be overloaded"); 1492 if (is_reference_type(fd->field_type())) { 1493 _found = true; 1494 _offset = fd->offset(); 1495 } 1496 } 1497 } 1498 bool found() { return _found; } 1499 int offset() { return _offset; } 1500 }; 1501 1502 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1503 TRAPS) { 1504 for (int i = 0; fields[i].valid(); i++) { 1505 ArchivableStaticFieldInfo* info = &fields[i]; 1506 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1507 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1508 ResourceMark rm; // for stringStream::as_string() etc. 1509 1510 #ifndef PRODUCT 1511 bool is_test_class = (ArchiveHeapTestClass != NULL) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0); 1512 #else 1513 bool is_test_class = false; 1514 #endif 1515 1516 if (is_test_class) { 1517 log_warning(cds)("Loading ArchiveHeapTestClass %s ...", ArchiveHeapTestClass); 1518 } 1519 1520 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD); 1521 if (HAS_PENDING_EXCEPTION) { 1522 CLEAR_PENDING_EXCEPTION; 1523 stringStream st; 1524 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name); 1525 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1526 } 1527 1528 if (!k->is_instance_klass()) { 1529 stringStream st; 1530 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name); 1531 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1532 } 1533 1534 InstanceKlass* ik = InstanceKlass::cast(k); 1535 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1536 "Only support boot classes"); 1537 1538 if (is_test_class) { 1539 if (ik->module()->is_named()) { 1540 // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary 1541 // core-lib classes. You need to at least append to the bootclasspath. 1542 stringStream st; 1543 st.print("ArchiveHeapTestClass %s is not in unnamed module", ArchiveHeapTestClass); 1544 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1545 } 1546 1547 if (ik->package() != NULL) { 1548 // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy. 1549 stringStream st; 1550 st.print("ArchiveHeapTestClass %s is not in unnamed package", ArchiveHeapTestClass); 1551 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1552 } 1553 } else { 1554 if (ik->module()->name() != vmSymbols::java_base()) { 1555 // We don't want to deal with cases when a module is unavailable at runtime. 1556 // FUTURE -- load from archived heap only when module graph has not changed 1557 // between dump and runtime. 1558 stringStream st; 1559 st.print("%s is not in java.base module", info->klass_name); 1560 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1561 } 1562 } 1563 1564 if (is_test_class) { 1565 log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", ArchiveHeapTestClass); 1566 } 1567 ik->initialize(CHECK); 1568 1569 ArchivableStaticFieldFinder finder(ik, field_name); 1570 ik->do_local_static_fields(&finder); 1571 if (!finder.found()) { 1572 stringStream st; 1573 st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name); 1574 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 1575 } 1576 1577 info->klass = ik; 1578 info->offset = finder.offset(); 1579 } 1580 } 1581 1582 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1583 assert(HeapShared::can_write(), "must be"); 1584 _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); 1585 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK); 1586 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK); 1587 if (MetaspaceShared::use_full_module_graph()) { 1588 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK); 1589 } 1590 } 1591 1592 #ifndef PRODUCT 1593 void HeapShared::setup_test_class(const char* test_class_name) { 1594 ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields; 1595 int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 1596 assert(p[num_slots - 2].klass_name == NULL, "must have empty slot that's patched below"); 1597 assert(p[num_slots - 1].klass_name == NULL, "must have empty slot that marks the end of the list"); 1598 1599 if (test_class_name != NULL) { 1600 p[num_slots - 2].klass_name = test_class_name; 1601 p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME; 1602 } 1603 } 1604 1605 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass 1606 // during runtime. This may be called before the module system is initialized so 1607 // we cannot rely on InstanceKlass::module(), etc. 1608 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) { 1609 if (_test_class != NULL) { 1610 if (ik == _test_class) { 1611 return true; 1612 } 1613 Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses(); 1614 if (klasses == NULL) { 1615 return false; 1616 } 1617 1618 for (int i = 0; i < klasses->length(); i++) { 1619 Klass* k = klasses->at(i); 1620 if (k == ik) { 1621 Symbol* name; 1622 if (k->is_instance_klass()) { 1623 name = InstanceKlass::cast(k)->name(); 1624 } else if (k->is_objArray_klass()) { 1625 Klass* bk = ObjArrayKlass::cast(k)->bottom_klass(); 1626 if (!bk->is_instance_klass()) { 1627 return false; 1628 } 1629 name = bk->name(); 1630 } else { 1631 return false; 1632 } 1633 1634 // See KlassSubGraphInfo::check_allowed_klass() - only two types of 1635 // classes are allowed: 1636 // (A) java.base classes (which must not be in the unnamed module) 1637 // (B) test classes which must be in the unnamed package of the unnamed module. 1638 // So if we see a '/' character in the class name, it must be in (A); 1639 // otherwise it must be in (B). 1640 if (name->index_of_at(0, "/", 1) >= 0) { 1641 return false; // (A) 1642 } 1643 1644 return true; // (B) 1645 } 1646 } 1647 } 1648 1649 return false; 1650 } 1651 #endif 1652 1653 void HeapShared::init_for_dumping(TRAPS) { 1654 if (HeapShared::can_write()) { 1655 setup_test_class(ArchiveHeapTestClass); 1656 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(); 1657 _native_pointers = new GrowableArrayCHeap<Metadata**, mtClassShared>(2048); 1658 init_subgraph_entry_fields(CHECK); 1659 } 1660 } 1661 1662 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1663 bool is_closed_archive, 1664 bool is_full_module_graph) { 1665 _num_total_subgraph_recordings = 0; 1666 _num_total_walked_objs = 0; 1667 _num_total_archived_objs = 0; 1668 _num_total_recorded_klasses = 0; 1669 _num_total_verifications = 0; 1670 1671 // For each class X that has one or more archived fields: 1672 // [1] Dump the subgraph of each archived field 1673 // [2] Create a list of all the class of the objects that can be reached 1674 // by any of these static fields. 1675 // At runtime, these classes are initialized before X's archived fields 1676 // are restored by HeapShared::initialize_from_archived_subgraph(). 1677 int i; 1678 for (int i = 0; fields[i].valid(); ) { 1679 ArchivableStaticFieldInfo* info = &fields[i]; 1680 const char* klass_name = info->klass_name; 1681 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1682 1683 // If you have specified consecutive fields of the same klass in 1684 // fields[], these will be archived in the same 1685 // {start_recording_subgraph ... done_recording_subgraph} pass to 1686 // save time. 1687 for (; fields[i].valid(); i++) { 1688 ArchivableStaticFieldInfo* f = &fields[i]; 1689 if (f->klass_name != klass_name) { 1690 break; 1691 } 1692 1693 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1694 f->offset, f->field_name, 1695 is_closed_archive); 1696 } 1697 done_recording_subgraph(info->klass, klass_name); 1698 } 1699 1700 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1701 is_closed_archive ? "closed" : "open", 1702 _num_total_subgraph_recordings); 1703 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1704 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1705 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1706 1707 #ifndef PRODUCT 1708 for (int i = 0; fields[i].valid(); i++) { 1709 ArchivableStaticFieldInfo* f = &fields[i]; 1710 verify_subgraph_from_static_field(f->klass, f->offset); 1711 } 1712 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1713 #endif 1714 } 1715 1716 // Not all the strings in the global StringTable are dumped into the archive, because 1717 // some of those strings may be only referenced by classes that are excluded from 1718 // the archive. We need to explicitly mark the strings that are: 1719 // [1] used by classes that WILL be archived; 1720 // [2] included in the SharedArchiveConfigFile. 1721 void HeapShared::add_to_dumped_interned_strings(oop string) { 1722 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1723 bool created; 1724 _dumped_interned_strings->put_if_absent(string, true, &created); 1725 } 1726 1727 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1728 // region. This way we can quickly relocate all the pointers without using 1729 // BasicOopIterateClosure at runtime. 1730 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1731 void* _start; 1732 BitMap *_oopmap; 1733 int _num_total_oops; 1734 int _num_null_oops; 1735 public: 1736 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 1737 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1738 1739 virtual void do_oop(narrowOop* p) { 1740 assert(UseCompressedOops, "sanity"); 1741 _num_total_oops ++; 1742 narrowOop v = *p; 1743 if (!CompressedOops::is_null(v)) { 1744 // Note: HeapShared::to_requested_address() is not necessary because 1745 // the heap always starts at a deterministic address with UseCompressedOops==true. 1746 size_t idx = p - (narrowOop*)_start; 1747 _oopmap->set_bit(idx); 1748 } else { 1749 _num_null_oops ++; 1750 } 1751 } 1752 virtual void do_oop(oop* p) { 1753 assert(!UseCompressedOops, "sanity"); 1754 _num_total_oops ++; 1755 if ((*p) != NULL) { 1756 size_t idx = p - (oop*)_start; 1757 _oopmap->set_bit(idx); 1758 if (DumpSharedSpaces) { 1759 // Make heap content deterministic. 1760 *p = HeapShared::to_requested_address(*p); 1761 } 1762 } else { 1763 _num_null_oops ++; 1764 } 1765 } 1766 int num_total_oops() const { return _num_total_oops; } 1767 int num_null_oops() const { return _num_null_oops; } 1768 }; 1769 1770 1771 address HeapShared::to_requested_address(address dumptime_addr) { 1772 assert(DumpSharedSpaces, "static dump time only"); 1773 if (dumptime_addr == NULL || UseCompressedOops) { 1774 return dumptime_addr; 1775 } 1776 1777 // With UseCompressedOops==false, actual_base is selected by the OS so 1778 // it's different across -Xshare:dump runs. 1779 address actual_base = (address)G1CollectedHeap::heap()->reserved().start(); 1780 address actual_end = (address)G1CollectedHeap::heap()->reserved().end(); 1781 assert(actual_base <= dumptime_addr && dumptime_addr <= actual_end, "must be an address in the heap"); 1782 1783 // We always write the objects as if the heap started at this address. This 1784 // makes the heap content deterministic. 1785 // 1786 // Note that at runtime, the heap address is also selected by the OS, so 1787 // the archive heap will not be mapped at 0x10000000. Instead, we will call 1788 // HeapShared::patch_embedded_pointers() to relocate the heap contents 1789 // accordingly. 1790 const address REQUESTED_BASE = (address)0x10000000; 1791 intx delta = REQUESTED_BASE - actual_base; 1792 1793 address requested_addr = dumptime_addr + delta; 1794 assert(REQUESTED_BASE != 0 && requested_addr != NULL, "sanity"); 1795 return requested_addr; 1796 } 1797 1798 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1799 size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 1800 ResourceBitMap oopmap(num_bits); 1801 1802 HeapWord* p = region.start(); 1803 HeapWord* end = region.end(); 1804 FindEmbeddedNonNullPointers finder((void*)p, &oopmap); 1805 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1806 1807 int num_objs = 0; 1808 while (p < end) { 1809 oop o = cast_to_oop(p); 1810 o->oop_iterate(&finder); 1811 p += o->size(); 1812 if (DumpSharedSpaces) { 1813 builder->relocate_klass_ptr_of_oop(o); 1814 } 1815 ++ num_objs; 1816 } 1817 1818 log_info(cds, heap)("calculate_oopmap: objects = %6d, oop fields = %7d (nulls = %7d)", 1819 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1820 return oopmap; 1821 } 1822 1823 1824 ResourceBitMap HeapShared::calculate_ptrmap(MemRegion region) { 1825 size_t num_bits = region.byte_size() / sizeof(Metadata*); 1826 ResourceBitMap oopmap(num_bits); 1827 1828 Metadata** start = (Metadata**)region.start(); 1829 Metadata** end = (Metadata**)region.end(); 1830 1831 int num_non_null_ptrs = 0; 1832 int len = _native_pointers->length(); 1833 for (int i = 0; i < len; i++) { 1834 Metadata** p = _native_pointers->at(i); 1835 if (start <= p && p < end) { 1836 assert(*p != NULL, "must be non-null"); 1837 num_non_null_ptrs ++; 1838 size_t idx = p - start; 1839 oopmap.set_bit(idx); 1840 } 1841 } 1842 1843 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers out of " 1844 SIZE_FORMAT " possible locations", num_non_null_ptrs, num_bits); 1845 if (num_non_null_ptrs > 0) { 1846 return oopmap; 1847 } else { 1848 return ResourceBitMap(0); 1849 } 1850 } 1851 1852 void HeapShared::count_allocation(size_t size) { 1853 _total_obj_count ++; 1854 _total_obj_size += size; 1855 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 1856 if (size <= (size_t(1) << i)) { 1857 _alloc_count[i] ++; 1858 _alloc_size[i] += size; 1859 return; 1860 } 1861 } 1862 } 1863 1864 static double avg_size(size_t size, size_t count) { 1865 double avg = 0; 1866 if (count > 0) { 1867 avg = double(size * HeapWordSize) / double(count); 1868 } 1869 return avg; 1870 } 1871 1872 void HeapShared::print_stats() { 1873 size_t huge_count = _total_obj_count; 1874 size_t huge_size = _total_obj_size; 1875 1876 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 1877 size_t byte_size_limit = (size_t(1) << i) * HeapWordSize; 1878 size_t count = _alloc_count[i]; 1879 size_t size = _alloc_size[i]; 1880 log_info(cds, heap)(SIZE_FORMAT_W(8) " objects are <= " SIZE_FORMAT_W(-6) 1881 " bytes (total " SIZE_FORMAT_W(8) " bytes, avg %8.1f bytes)", 1882 count, byte_size_limit, size * HeapWordSize, avg_size(size, count)); 1883 huge_count -= count; 1884 huge_size -= size; 1885 } 1886 1887 log_info(cds, heap)(SIZE_FORMAT_W(8) " huge objects (total " SIZE_FORMAT_W(8) " bytes" 1888 ", avg %8.1f bytes)", 1889 huge_count, huge_size * HeapWordSize, 1890 avg_size(huge_size, huge_count)); 1891 log_info(cds, heap)(SIZE_FORMAT_W(8) " total objects (total " SIZE_FORMAT_W(8) " bytes" 1892 ", avg %8.1f bytes)", 1893 _total_obj_count, _total_obj_size * HeapWordSize, 1894 avg_size(_total_obj_size, _total_obj_count)); 1895 } 1896 1897 #endif // INCLUDE_CDS_JAVA_HEAP