1 /* 2 * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveUtils.hpp" 28 #include "cds/cdsHeapVerifier.hpp" 29 #include "cds/filemap.hpp" 30 #include "cds/heapShared.inline.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/classLoaderData.hpp" 33 #include "classfile/classLoaderDataShared.hpp" 34 #include "classfile/javaClasses.inline.hpp" 35 #include "classfile/moduleEntry.hpp" 36 #include "classfile/stringTable.hpp" 37 #include "classfile/symbolTable.hpp" 38 #include "classfile/systemDictionary.hpp" 39 #include "classfile/systemDictionaryShared.hpp" 40 #include "classfile/vmClasses.hpp" 41 #include "classfile/vmSymbols.hpp" 42 #include "gc/shared/collectedHeap.hpp" 43 #include "gc/shared/gcLocker.hpp" 44 #include "gc/shared/gcVMOperations.hpp" 45 #include "logging/log.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/iterator.inline.hpp" 48 #include "memory/metadataFactory.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/resourceArea.hpp" 51 #include "memory/universe.hpp" 52 #include "oops/compressedOops.inline.hpp" 53 #include "oops/fieldStreams.inline.hpp" 54 #include "oops/objArrayOop.inline.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "oops/typeArrayOop.inline.hpp" 57 #include "prims/jvmtiExport.hpp" 58 #include "runtime/fieldDescriptor.inline.hpp" 59 #include "runtime/globals_extension.hpp" 60 #include "runtime/init.hpp" 61 #include "runtime/java.hpp" 62 #include "runtime/javaCalls.hpp" 63 #include "runtime/safepointVerifiers.hpp" 64 #include "utilities/bitMap.inline.hpp" 65 #include "utilities/copy.hpp" 66 #if INCLUDE_G1GC 67 #include "gc/g1/g1CollectedHeap.hpp" 68 #endif 69 70 #if INCLUDE_CDS_JAVA_HEAP 71 72 bool HeapShared::_closed_regions_mapped = false; 73 bool HeapShared::_open_regions_mapped = false; 74 bool HeapShared::_is_loaded = false; 75 bool HeapShared::_disable_writing = false; 76 address HeapShared::_narrow_oop_base; 77 int HeapShared::_narrow_oop_shift; 78 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 79 80 // Support for loaded heap. 81 uintptr_t HeapShared::_loaded_heap_bottom = 0; 82 uintptr_t HeapShared::_loaded_heap_top = 0; 83 uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX; 84 uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX; 85 uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX; 86 uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX; 87 uintptr_t HeapShared::_dumptime_top = 0; 88 intx HeapShared::_runtime_offset_0 = 0; 89 intx HeapShared::_runtime_offset_1 = 0; 90 intx HeapShared::_runtime_offset_2 = 0; 91 intx HeapShared::_runtime_offset_3 = 0; 92 bool HeapShared::_loading_failed = false; 93 94 // Support for mapped heap (!UseCompressedOops only) 95 ptrdiff_t HeapShared::_runtime_delta = 0; 96 97 // 98 // If you add new entries to the following tables, you should know what you're doing! 99 // 100 101 // Entry fields for shareable subgraphs archived in the closed archive heap 102 // region. Warning: Objects in the subgraphs should not have reference fields 103 // assigned at runtime. 104 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 105 {"java/lang/Integer$IntegerCache", "archivedCache"}, 106 {"java/lang/Long$LongCache", "archivedCache"}, 107 {"java/lang/Byte$ByteCache", "archivedCache"}, 108 {"java/lang/Short$ShortCache", "archivedCache"}, 109 {"java/lang/Character$CharacterCache", "archivedCache"}, 110 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 111 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 112 }; 113 // Entry fields for subgraphs archived in the open archive heap region. 114 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 115 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 116 {"java/util/ImmutableCollections", "archivedObjects"}, 117 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 118 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 119 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 120 }; 121 122 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 123 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 124 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 125 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 126 {"java/lang/Module$ArchivedData", "archivedData"}, 127 }; 128 129 const static int num_closed_archive_subgraph_entry_fields = 130 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 131 const static int num_open_archive_subgraph_entry_fields = 132 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 133 const static int num_fmg_open_archive_subgraph_entry_fields = 134 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 135 136 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 137 OopHandle HeapShared::_roots; 138 139 #ifdef ASSERT 140 bool HeapShared::is_archived_object_during_dumptime(oop p) { 141 assert(HeapShared::can_write(), "must be"); 142 assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); 143 return Universe::heap()->is_archived_object(p); 144 } 145 #endif 146 147 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], int num, InstanceKlass* ik) { 148 for (int i = 0; i < num; i++) { 149 if (fields[i].klass == ik) { 150 return true; 151 } 152 } 153 return false; 154 } 155 156 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 157 return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, 158 num_closed_archive_subgraph_entry_fields, ik) || 159 is_subgraph_root_class_of(open_archive_subgraph_entry_fields, 160 num_open_archive_subgraph_entry_fields, ik) || 161 is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, 162 num_fmg_open_archive_subgraph_entry_fields, ik); 163 } 164 165 void HeapShared::fixup_regions() { 166 FileMapInfo* mapinfo = FileMapInfo::current_info(); 167 if (is_mapped()) { 168 mapinfo->fixup_mapped_heap_regions(); 169 } else if (_loading_failed) { 170 fill_failed_loaded_region(); 171 } 172 if (is_fully_available()) { 173 if (!MetaspaceShared::use_full_module_graph()) { 174 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 175 ClassLoaderDataShared::clear_archived_oops(); 176 } 177 } 178 SystemDictionaryShared::update_archived_mirror_native_pointers(); 179 } 180 181 unsigned HeapShared::oop_hash(oop const& p) { 182 unsigned hash = (unsigned)p->identity_hash(); 183 return hash; 184 } 185 186 static void reset_states(oop obj, TRAPS) { 187 Handle h_obj(THREAD, obj); 188 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 189 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 190 Symbol* method_sig = vmSymbols::void_method_signature(); 191 192 while (klass != NULL) { 193 Method* method = klass->find_method(method_name, method_sig); 194 if (method != NULL) { 195 assert(method->is_private(), "must be"); 196 if (log_is_enabled(Debug, cds)) { 197 ResourceMark rm(THREAD); 198 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 199 } 200 JavaValue result(T_VOID); 201 JavaCalls::call_special(&result, h_obj, klass, 202 method_name, method_sig, CHECK); 203 } 204 klass = klass->java_super(); 205 } 206 } 207 208 void HeapShared::reset_archived_object_states(TRAPS) { 209 assert(DumpSharedSpaces, "dump-time only"); 210 log_debug(cds)("Resetting platform loader"); 211 reset_states(SystemDictionary::java_platform_loader(), CHECK); 212 log_debug(cds)("Resetting system loader"); 213 reset_states(SystemDictionary::java_system_loader(), CHECK); 214 215 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not 216 // directly used for class loading, but rather is used by the core library 217 // to keep track of resources, etc, loaded by the null class loader. 218 // 219 // Note, this object is non-null, and is not the same as 220 // ClassLoaderData::the_null_class_loader_data()->class_loader(), 221 // which is null. 222 log_debug(cds)("Resetting boot loader"); 223 JavaValue result(T_OBJECT); 224 JavaCalls::call_static(&result, 225 vmClasses::jdk_internal_loader_ClassLoaders_klass(), 226 vmSymbols::bootLoader_name(), 227 vmSymbols::void_BuiltinClassLoader_signature(), 228 CHECK); 229 Handle boot_loader(THREAD, result.get_oop()); 230 reset_states(boot_loader(), CHECK); 231 } 232 233 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 234 HeapShared::OriginalObjectTable* HeapShared::_original_object_table = NULL; 235 oop HeapShared::find_archived_heap_object(oop obj) { 236 assert(DumpSharedSpaces, "dump-time only"); 237 ArchivedObjectCache* cache = archived_object_cache(); 238 CachedOopInfo* p = cache->get(obj); 239 if (p != NULL) { 240 return p->_obj; 241 } else { 242 return NULL; 243 } 244 } 245 246 int HeapShared::append_root(oop obj) { 247 assert(DumpSharedSpaces, "dump-time only"); 248 249 // No GC should happen since we aren't scanning _pending_roots. 250 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 251 252 if (_pending_roots == NULL) { 253 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 254 } 255 256 return _pending_roots->append(obj); 257 } 258 259 objArrayOop HeapShared::roots() { 260 if (DumpSharedSpaces) { 261 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 262 if (!HeapShared::can_write()) { 263 return NULL; 264 } 265 } else { 266 assert(UseSharedSpaces, "must be"); 267 } 268 269 objArrayOop roots = (objArrayOop)_roots.resolve(); 270 assert(roots != NULL, "should have been initialized"); 271 return roots; 272 } 273 274 // Returns an objArray that contains all the roots of the archived objects 275 oop HeapShared::get_root(int index, bool clear) { 276 assert(index >= 0, "sanity"); 277 if (DumpSharedSpaces) { 278 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 279 assert(_pending_roots != NULL, "sanity"); 280 return _pending_roots->at(index); 281 } else { 282 assert(UseSharedSpaces, "must be"); 283 assert(!_roots.is_empty(), "must have loaded shared heap"); 284 oop result = roots()->obj_at(index); 285 if (clear) { 286 clear_root(index); 287 } 288 return result; 289 } 290 } 291 292 void HeapShared::clear_root(int index) { 293 assert(index >= 0, "sanity"); 294 assert(UseSharedSpaces, "must be"); 295 if (is_fully_available()) { 296 if (log_is_enabled(Debug, cds, heap)) { 297 oop old = roots()->obj_at(index); 298 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 299 } 300 roots()->obj_at_put(index, NULL); 301 } 302 } 303 304 oop HeapShared::archive_object(oop obj) { 305 assert(DumpSharedSpaces, "dump-time only"); 306 307 oop ao = find_archived_heap_object(obj); 308 if (ao != NULL) { 309 // already archived 310 return ao; 311 } 312 313 int len = obj->size(); 314 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 315 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 316 p2i(obj), (size_t)obj->size()); 317 return NULL; 318 } 319 320 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 321 if (archived_oop != NULL) { 322 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 323 // Reinitialize markword to remove age/marking/locking/etc. 324 // 325 // We need to retain the identity_hash, because it may have been used by some hashtables 326 // in the shared heap. This also has the side effect of pre-initializing the 327 // identity_hash for all shared objects, so they are less likely to be written 328 // into during run time, increasing the potential of memory sharing. 329 int hash_original = obj->identity_hash(); 330 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original)); 331 assert(archived_oop->mark().is_unlocked(), "sanity"); 332 333 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 334 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 335 336 ArchivedObjectCache* cache = archived_object_cache(); 337 CachedOopInfo info = make_cached_oop_info(archived_oop); 338 cache->put(obj, info); 339 if (_original_object_table != NULL) { 340 _original_object_table->put(archived_oop, obj); 341 } 342 if (log_is_enabled(Debug, cds, heap)) { 343 ResourceMark rm; 344 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 345 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 346 } 347 } else { 348 log_error(cds, heap)( 349 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 350 p2i(obj)); 351 vm_direct_exit(-1, 352 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 353 SIZE_FORMAT "M", MaxHeapSize/M)); 354 } 355 return archived_oop; 356 } 357 358 void HeapShared::archive_klass_objects() { 359 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 360 assert(klasses != NULL, "sanity"); 361 for (int i = 0; i < klasses->length(); i++) { 362 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i)); 363 364 // archive mirror object 365 java_lang_Class::archive_mirror(k); 366 367 // archive the resolved_referenes array 368 if (k->is_instance_klass()) { 369 InstanceKlass* ik = InstanceKlass::cast(k); 370 ik->constants()->archive_resolved_references(); 371 } 372 } 373 } 374 375 // -- Handling of Enum objects 376 // Java Enum classes have synthetic <clinit> methods that look like this 377 // enum MyEnum {FOO, BAR} 378 // MyEnum::<clinint> { 379 // /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO"); 380 // /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR"); 381 // } 382 // 383 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must 384 // ensure the archived value equals (in object address) to the runtime value of 385 // MyEnum::FOO. 386 // 387 // However, since MyEnum::<clinint> is synthetically generated by javac, there's 388 // no way of programmatically handling this inside the Java code (as you would handle 389 // ModuleLayer::EMPTY_LAYER, for example). 390 // 391 // Instead, we archive all static field of such Enum classes. At runtime, 392 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull 393 // the static fields out of the archived heap. 394 void HeapShared::check_enum_obj(int level, 395 KlassSubGraphInfo* subgraph_info, 396 oop orig_obj, 397 bool is_closed_archive) { 398 Klass* k = orig_obj->klass(); 399 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 400 if (!k->is_instance_klass()) { 401 return; 402 } 403 InstanceKlass* ik = InstanceKlass::cast(k); 404 if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) { 405 ResourceMark rm; 406 ik->set_has_archived_enum_objs(); 407 relocated_k->set_has_archived_enum_objs(); 408 oop mirror = ik->java_mirror(); 409 410 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 411 if (fs.access_flags().is_static()) { 412 fieldDescriptor& fd = fs.field_descriptor(); 413 if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) { 414 guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY", 415 ik->external_name(), fd.name()->as_C_string()); 416 } 417 oop oop_field = mirror->obj_field(fd.offset()); 418 if (oop_field == NULL) { 419 guarantee(false, "static field %s::%s must not be null", 420 ik->external_name(), fd.name()->as_C_string()); 421 } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) { 422 guarantee(false, "static field %s::%s is of the wrong type", 423 ik->external_name(), fd.name()->as_C_string()); 424 } 425 oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive); 426 int root_index = append_root(archived_oop_field); 427 log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")", 428 root_index, ik->external_name(), fd.name()->as_C_string(), 429 p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field)); 430 SystemDictionaryShared::add_enum_klass_static_field(ik, root_index); 431 } 432 } 433 } 434 } 435 436 // See comments in HeapShared::check_enum_obj() 437 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) { 438 if (!is_fully_available()) { 439 return false; 440 } 441 442 RunTimeClassInfo* info = RunTimeClassInfo::get_for(k); 443 assert(info != NULL, "sanity"); 444 445 if (log_is_enabled(Info, cds, heap)) { 446 ResourceMark rm; 447 log_info(cds, heap)("Initializing Enum class: %s", k->external_name()); 448 } 449 450 oop mirror = k->java_mirror(); 451 int i = 0; 452 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 453 if (fs.access_flags().is_static()) { 454 int root_index = info->enum_klass_static_field_root_index_at(i++); 455 fieldDescriptor& fd = fs.field_descriptor(); 456 assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be"); 457 mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true)); 458 } 459 } 460 return true; 461 } 462 463 void HeapShared::run_full_gc_in_vm_thread() { 464 if (HeapShared::can_write()) { 465 // Avoid fragmentation while archiving heap objects. 466 // We do this inside a safepoint, so that no further allocation can happen after GC 467 // has finished. 468 if (GCLocker::is_active()) { 469 // Just checking for safety ... 470 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 471 // has been modified such that JNI code is executed in some clean up threads after 472 // we have finished class loading. 473 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 474 } else { 475 log_info(cds)("Run GC ..."); 476 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 477 log_info(cds)("Run GC done"); 478 } 479 } 480 } 481 482 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions, 483 GrowableArray<MemRegion>* open_regions) { 484 485 G1HeapVerifier::verify_ready_for_archiving(); 486 487 { 488 NoSafepointVerifier nsv; 489 490 // Cache for recording where the archived objects are copied to 491 create_archived_object_cache(log_is_enabled(Info, cds, map)); 492 493 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 494 UseCompressedOops ? p2i(CompressedOops::begin()) : 495 p2i((address)G1CollectedHeap::heap()->reserved().start()), 496 UseCompressedOops ? p2i(CompressedOops::end()) : 497 p2i((address)G1CollectedHeap::heap()->reserved().end())); 498 log_info(cds)("Dumping objects to closed archive heap region ..."); 499 copy_closed_objects(closed_regions); 500 501 log_info(cds)("Dumping objects to open archive heap region ..."); 502 copy_open_objects(open_regions); 503 504 CDSHeapVerifier::verify(); 505 } 506 507 G1HeapVerifier::verify_archive_regions(); 508 } 509 510 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) { 511 assert(HeapShared::can_write(), "must be"); 512 513 G1CollectedHeap::heap()->begin_archive_alloc_range(); 514 515 // Archive interned string objects 516 StringTable::write_to_archive(_dumped_interned_strings); 517 518 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 519 num_closed_archive_subgraph_entry_fields, 520 true /* is_closed_archive */, 521 false /* is_full_module_graph */); 522 523 G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions, 524 os::vm_allocation_granularity()); 525 } 526 527 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) { 528 assert(HeapShared::can_write(), "must be"); 529 530 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 531 532 java_lang_Class::archive_basic_type_mirrors(); 533 534 archive_klass_objects(); 535 536 archive_object_subgraphs(open_archive_subgraph_entry_fields, 537 num_open_archive_subgraph_entry_fields, 538 false /* is_closed_archive */, 539 false /* is_full_module_graph */); 540 if (MetaspaceShared::use_full_module_graph()) { 541 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 542 num_fmg_open_archive_subgraph_entry_fields, 543 false /* is_closed_archive */, 544 true /* is_full_module_graph */); 545 ClassLoaderDataShared::init_archived_oops(); 546 } 547 548 copy_roots(); 549 550 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions, 551 os::vm_allocation_granularity()); 552 } 553 554 // Copy _pending_archive_roots into an objArray 555 void HeapShared::copy_roots() { 556 // HeapShared::roots() points into an ObjArray in the open archive region. A portion of the 557 // objects in this array are discovered during HeapShared::archive_objects(). For example, 558 // in HeapShared::archive_reachable_objects_from() -> HeapShared::check_enum_obj(). 559 // However, HeapShared::archive_objects() happens inside a safepoint, so we can't 560 // allocate a "regular" ObjArray and pass the result to HeapShared::archive_object(). 561 // Instead, we have to roll our own alloc/copy routine here. 562 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 563 size_t size = objArrayOopDesc::object_size(length); 564 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 565 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 566 567 memset(mem, 0, size * BytesPerWord); 568 { 569 // This is copied from MemAllocator::finish 570 oopDesc::set_mark(mem, markWord::prototype()); 571 oopDesc::release_set_klass(mem, k); 572 } 573 { 574 // This is copied from ObjArrayAllocator::initialize 575 arrayOopDesc::set_length(mem, length); 576 } 577 578 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 579 for (int i = 0; i < length; i++) { 580 roots()->obj_at_put(i, _pending_roots->at(i)); 581 } 582 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem); 583 } 584 585 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 586 _narrow_oop_base = base; 587 _narrow_oop_shift = shift; 588 } 589 590 // 591 // Subgraph archiving support 592 // 593 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 594 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 595 596 // Get the subgraph_info for Klass k. A new subgraph_info is created if 597 // there is no existing one for k. The subgraph_info records the relocated 598 // Klass* of the original k. 599 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 600 assert(DumpSharedSpaces, "dump time only"); 601 bool created; 602 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 603 KlassSubGraphInfo* info = 604 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(relocated_k, is_full_module_graph), 605 &created); 606 assert(created, "must not initialize twice"); 607 return info; 608 } 609 610 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 611 assert(DumpSharedSpaces, "dump time only"); 612 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 613 assert(info != NULL, "must have been initialized"); 614 return info; 615 } 616 617 // Add an entry field to the current KlassSubGraphInfo. 618 void KlassSubGraphInfo::add_subgraph_entry_field( 619 int static_field_offset, oop v, bool is_closed_archive) { 620 assert(DumpSharedSpaces, "dump time only"); 621 if (_subgraph_entry_fields == NULL) { 622 _subgraph_entry_fields = 623 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass); 624 } 625 _subgraph_entry_fields->append(static_field_offset); 626 _subgraph_entry_fields->append(HeapShared::append_root(v)); 627 } 628 629 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 630 // Only objects of boot classes can be included in sub-graph. 631 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 632 assert(DumpSharedSpaces, "dump time only"); 633 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k); 634 635 if (_subgraph_object_klasses == NULL) { 636 _subgraph_object_klasses = 637 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 638 } 639 640 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class"); 641 642 if (_k == relocated_k) { 643 // Don't add the Klass containing the sub-graph to it's own klass 644 // initialization list. 645 return; 646 } 647 648 if (relocated_k->is_instance_klass()) { 649 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 650 "must be boot class"); 651 // vmClasses::xxx_klass() are not updated, need to check 652 // the original Klass* 653 if (orig_k == vmClasses::String_klass() || 654 orig_k == vmClasses::Object_klass()) { 655 // Initialized early during VM initialization. No need to be added 656 // to the sub-graph object class list. 657 return; 658 } 659 } else if (relocated_k->is_objArray_klass()) { 660 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 661 if (abk->is_instance_klass()) { 662 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 663 "must be boot class"); 664 } 665 if (relocated_k == Universe::objectArrayKlassObj()) { 666 // Initialized early during Universe::genesis. No need to be added 667 // to the list. 668 return; 669 } 670 } else { 671 assert(relocated_k->is_typeArray_klass(), "must be"); 672 // Primitive type arrays are created early during Universe::genesis. 673 return; 674 } 675 676 if (log_is_enabled(Debug, cds, heap)) { 677 if (!_subgraph_object_klasses->contains(relocated_k)) { 678 ResourceMark rm; 679 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 680 } 681 } 682 683 _subgraph_object_klasses->append_if_missing(relocated_k); 684 _has_non_early_klasses |= is_non_early_klass(orig_k); 685 } 686 687 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 688 if (k->is_objArray_klass()) { 689 k = ObjArrayKlass::cast(k)->bottom_klass(); 690 } 691 if (k->is_instance_klass()) { 692 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 693 ResourceMark rm; 694 log_info(cds, heap)("non-early: %s", k->external_name()); 695 return true; 696 } else { 697 return false; 698 } 699 } else { 700 return false; 701 } 702 } 703 704 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 705 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 706 _k = info->klass(); 707 _entry_field_records = NULL; 708 _subgraph_object_klasses = NULL; 709 _is_full_module_graph = info->is_full_module_graph(); 710 711 if (_is_full_module_graph) { 712 // Consider all classes referenced by the full module graph as early -- we will be 713 // allocating objects of these classes during JVMTI early phase, so they cannot 714 // be processed by (non-early) JVMTI ClassFileLoadHook 715 _has_non_early_klasses = false; 716 } else { 717 _has_non_early_klasses = info->has_non_early_klasses(); 718 } 719 720 if (_has_non_early_klasses) { 721 ResourceMark rm; 722 log_info(cds, heap)( 723 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 724 _k->external_name()); 725 } 726 727 // populate the entry fields 728 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 729 if (entry_fields != NULL) { 730 int num_entry_fields = entry_fields->length(); 731 assert(num_entry_fields % 2 == 0, "sanity"); 732 _entry_field_records = 733 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 734 for (int i = 0 ; i < num_entry_fields; i++) { 735 _entry_field_records->at_put(i, entry_fields->at(i)); 736 } 737 } 738 739 // the Klasses of the objects in the sub-graphs 740 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 741 if (subgraph_object_klasses != NULL) { 742 int num_subgraphs_klasses = subgraph_object_klasses->length(); 743 _subgraph_object_klasses = 744 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 745 for (int i = 0; i < num_subgraphs_klasses; i++) { 746 Klass* subgraph_k = subgraph_object_klasses->at(i); 747 if (log_is_enabled(Info, cds, heap)) { 748 ResourceMark rm; 749 log_info(cds, heap)( 750 "Archived object klass %s (%2d) => %s", 751 _k->external_name(), i, subgraph_k->external_name()); 752 } 753 _subgraph_object_klasses->at_put(i, subgraph_k); 754 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 755 } 756 } 757 758 ArchivePtrMarker::mark_pointer(&_k); 759 ArchivePtrMarker::mark_pointer(&_entry_field_records); 760 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 761 } 762 763 struct CopyKlassSubGraphInfoToArchive : StackObj { 764 CompactHashtableWriter* _writer; 765 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 766 767 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 768 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 769 ArchivedKlassSubGraphInfoRecord* record = 770 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 771 record->init(&info); 772 773 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(klass); 774 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)relocated_k); 775 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 776 _writer->add(hash, delta); 777 } 778 return true; // keep on iterating 779 } 780 }; 781 782 // Build the records of archived subgraph infos, which include: 783 // - Entry points to all subgraphs from the containing class mirror. The entry 784 // points are static fields in the mirror. For each entry point, the field 785 // offset, value and is_closed_archive flag are recorded in the sub-graph 786 // info. The value is stored back to the corresponding field at runtime. 787 // - A list of klasses that need to be loaded/initialized before archived 788 // java object sub-graph can be accessed at runtime. 789 void HeapShared::write_subgraph_info_table() { 790 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 791 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 792 CompactHashtableStats stats; 793 794 _run_time_subgraph_info_table.reset(); 795 796 CompactHashtableWriter writer(d_table->_count, &stats); 797 CopyKlassSubGraphInfoToArchive copy(&writer); 798 d_table->iterate(©); 799 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 800 } 801 802 void HeapShared::serialize(SerializeClosure* soc) { 803 oop roots_oop = NULL; 804 805 if (soc->reading()) { 806 soc->do_oop(&roots_oop); // read from archive 807 assert(oopDesc::is_oop_or_null(roots_oop), "is oop"); 808 // Create an OopHandle only if we have actually mapped or loaded the roots 809 if (roots_oop != NULL) { 810 assert(HeapShared::is_fully_available(), "must be"); 811 _roots = OopHandle(Universe::vm_global(), roots_oop); 812 } 813 } else { 814 // writing 815 roots_oop = roots(); 816 soc->do_oop(&roots_oop); // write to archive 817 } 818 819 _run_time_subgraph_info_table.serialize_header(soc); 820 } 821 822 static void verify_the_heap(Klass* k, const char* which) { 823 if (VerifyArchivedFields > 0) { 824 ResourceMark rm; 825 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 826 which, k->external_name()); 827 828 VM_Verify verify_op; 829 VMThread::execute(&verify_op); 830 831 if (VerifyArchivedFields > 1 && is_init_completed()) { 832 // At this time, the oop->klass() of some archived objects in the heap may not 833 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 834 // have enough information (object size, oop maps, etc) so that a GC can be safely 835 // performed. 836 // 837 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 838 // to check for GC safety. 839 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 840 which, k->external_name()); 841 FlagSetting fs1(VerifyBeforeGC, true); 842 FlagSetting fs2(VerifyDuringGC, true); 843 FlagSetting fs3(VerifyAfterGC, true); 844 Universe::heap()->collect(GCCause::_java_lang_system_gc); 845 } 846 } 847 } 848 849 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 850 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 851 // 852 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 853 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 854 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 855 void HeapShared::resolve_classes(JavaThread* THREAD) { 856 if (!is_fully_available()) { 857 return; // nothing to do 858 } 859 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, 860 num_closed_archive_subgraph_entry_fields, 861 THREAD); 862 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, 863 num_open_archive_subgraph_entry_fields, 864 THREAD); 865 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, 866 num_fmg_open_archive_subgraph_entry_fields, 867 THREAD); 868 } 869 870 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], 871 int num, JavaThread* THREAD) { 872 for (int i = 0; i < num; i++) { 873 ArchivableStaticFieldInfo* info = &fields[i]; 874 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 875 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 876 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 877 resolve_classes_for_subgraph_of(k, THREAD); 878 } 879 } 880 881 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) { 882 ExceptionMark em(THREAD); 883 const ArchivedKlassSubGraphInfoRecord* record = 884 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 885 if (HAS_PENDING_EXCEPTION) { 886 CLEAR_PENDING_EXCEPTION; 887 } 888 if (record == NULL) { 889 clear_archived_roots_of(k); 890 } 891 } 892 893 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) { 894 if (!is_fully_available()) { 895 return; // nothing to do 896 } 897 898 ExceptionMark em(THREAD); 899 const ArchivedKlassSubGraphInfoRecord* record = 900 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 901 902 if (HAS_PENDING_EXCEPTION) { 903 CLEAR_PENDING_EXCEPTION; 904 // None of the field value will be set if there was an exception when initializing the classes. 905 // The java code will not see any of the archived objects in the 906 // subgraphs referenced from k in this case. 907 return; 908 } 909 910 if (record != NULL) { 911 init_archived_fields_for(k, record); 912 } 913 } 914 915 const ArchivedKlassSubGraphInfoRecord* 916 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 917 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 918 919 if (!k->is_shared()) { 920 return NULL; 921 } 922 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 923 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 924 925 // Initialize from archived data. Currently this is done only 926 // during VM initialization time. No lock is needed. 927 if (record != NULL) { 928 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 929 if (log_is_enabled(Info, cds, heap)) { 930 ResourceMark rm(THREAD); 931 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 932 k->external_name()); 933 } 934 return NULL; 935 } 936 937 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 938 if (log_is_enabled(Info, cds, heap)) { 939 ResourceMark rm(THREAD); 940 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 941 k->external_name()); 942 } 943 return NULL; 944 } 945 946 resolve_or_init(k, do_init, CHECK_NULL); 947 948 // Load/link/initialize the klasses of the objects in the subgraph. 949 // NULL class loader is used. 950 Array<Klass*>* klasses = record->subgraph_object_klasses(); 951 if (klasses != NULL) { 952 for (int i = 0; i < klasses->length(); i++) { 953 Klass* klass = klasses->at(i); 954 if (!klass->is_shared()) { 955 return NULL; 956 } 957 resolve_or_init(klass, do_init, CHECK_NULL); 958 } 959 } 960 } 961 962 return record; 963 } 964 965 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 966 if (!do_init) { 967 if (k->class_loader_data() == NULL) { 968 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 969 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 970 } 971 } else { 972 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 973 if (k->is_instance_klass()) { 974 InstanceKlass* ik = InstanceKlass::cast(k); 975 ik->initialize(CHECK); 976 } else if (k->is_objArray_klass()) { 977 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 978 oak->initialize(CHECK); 979 } 980 } 981 } 982 983 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 984 verify_the_heap(k, "before"); 985 986 // Load the subgraph entry fields from the record and store them back to 987 // the corresponding fields within the mirror. 988 oop m = k->java_mirror(); 989 Array<int>* entry_field_records = record->entry_field_records(); 990 if (entry_field_records != NULL) { 991 int efr_len = entry_field_records->length(); 992 assert(efr_len % 2 == 0, "sanity"); 993 for (int i = 0; i < efr_len; i += 2) { 994 int field_offset = entry_field_records->at(i); 995 int root_index = entry_field_records->at(i+1); 996 oop v = get_root(root_index, /*clear=*/true); 997 m->obj_field_put(field_offset, v); 998 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 999 } 1000 1001 // Done. Java code can see the archived sub-graphs referenced from k's 1002 // mirror after this point. 1003 if (log_is_enabled(Info, cds, heap)) { 1004 ResourceMark rm; 1005 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 1006 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 1007 } 1008 } 1009 1010 verify_the_heap(k, "after "); 1011 } 1012 1013 void HeapShared::clear_archived_roots_of(Klass* k) { 1014 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1015 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1016 if (record != NULL) { 1017 Array<int>* entry_field_records = record->entry_field_records(); 1018 if (entry_field_records != NULL) { 1019 int efr_len = entry_field_records->length(); 1020 assert(efr_len % 2 == 0, "sanity"); 1021 for (int i = 0; i < efr_len; i += 2) { 1022 int root_index = entry_field_records->at(i+1); 1023 clear_root(root_index); 1024 } 1025 } 1026 } 1027 } 1028 1029 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 1030 int _level; 1031 bool _is_closed_archive; 1032 bool _record_klasses_only; 1033 KlassSubGraphInfo* _subgraph_info; 1034 oop _orig_referencing_obj; 1035 oop _archived_referencing_obj; 1036 1037 // The following are for maintaining a stack for determining 1038 // CachedOopInfo::_referrer 1039 static WalkOopAndArchiveClosure* _current; 1040 WalkOopAndArchiveClosure* _last; 1041 public: 1042 WalkOopAndArchiveClosure(int level, 1043 bool is_closed_archive, 1044 bool record_klasses_only, 1045 KlassSubGraphInfo* subgraph_info, 1046 oop orig, oop archived) : 1047 _level(level), _is_closed_archive(is_closed_archive), 1048 _record_klasses_only(record_klasses_only), 1049 _subgraph_info(subgraph_info), 1050 _orig_referencing_obj(orig), _archived_referencing_obj(archived) { 1051 _last = _current; 1052 _current = this; 1053 } 1054 ~WalkOopAndArchiveClosure() { 1055 _current = _last; 1056 } 1057 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1058 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1059 1060 protected: 1061 template <class T> void do_oop_work(T *p) { 1062 oop obj = RawAccess<>::oop_load(p); 1063 if (!CompressedOops::is_null(obj)) { 1064 assert(!HeapShared::is_archived_object_during_dumptime(obj), 1065 "original objects must not point to archived objects"); 1066 1067 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 1068 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 1069 1070 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 1071 ResourceMark rm; 1072 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level, 1073 _orig_referencing_obj->klass()->external_name(), field_delta, 1074 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1075 LogTarget(Trace, cds, heap) log; 1076 LogStream out(log); 1077 obj->print_on(&out); 1078 } 1079 1080 oop archived = HeapShared::archive_reachable_objects_from( 1081 _level + 1, _subgraph_info, obj, _is_closed_archive); 1082 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 1083 assert(HeapShared::is_archived_object_during_dumptime(archived), "must be"); 1084 1085 if (!_record_klasses_only) { 1086 // Update the reference in the archived copy of the referencing object. 1087 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 1088 _level, p2i(new_p), p2i(obj), p2i(archived)); 1089 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 1090 } 1091 } 1092 } 1093 1094 public: 1095 static WalkOopAndArchiveClosure* current() { return _current; } 1096 oop orig_referencing_obj() { return _orig_referencing_obj; } 1097 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1098 }; 1099 1100 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL; 1101 1102 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) { 1103 CachedOopInfo info; 1104 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); 1105 1106 info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info(); 1107 info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj(); 1108 info._obj = orig_obj; 1109 1110 return info; 1111 } 1112 1113 void HeapShared::check_closed_region_object(InstanceKlass* k) { 1114 // Check fields in the object 1115 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 1116 if (!fs.access_flags().is_static()) { 1117 BasicType ft = fs.field_descriptor().field_type(); 1118 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 1119 ResourceMark rm; 1120 log_warning(cds, heap)( 1121 "Please check reference field in %s instance in closed archive heap region: %s %s", 1122 k->external_name(), (fs.name())->as_C_string(), 1123 (fs.signature())->as_C_string()); 1124 } 1125 } 1126 } 1127 } 1128 1129 void HeapShared::check_module_oop(oop orig_module_obj) { 1130 assert(DumpSharedSpaces, "must be"); 1131 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 1132 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 1133 if (orig_module_ent == NULL) { 1134 // These special Module objects are created in Java code. They are not 1135 // defined via Modules::define_module(), so they don't have a ModuleEntry: 1136 // java.lang.Module::ALL_UNNAMED_MODULE 1137 // java.lang.Module::EVERYONE_MODULE 1138 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 1139 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 1140 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 1141 } else { 1142 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 1143 assert(loader_data->is_builtin_class_loader_data(), "must be"); 1144 } 1145 } 1146 1147 1148 // (1) If orig_obj has not been archived yet, archive it. 1149 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1150 // trace all objects that are reachable from it, and make sure these objects are archived. 1151 // (3) Record the klasses of all orig_obj and all reachable objects. 1152 oop HeapShared::archive_reachable_objects_from(int level, 1153 KlassSubGraphInfo* subgraph_info, 1154 oop orig_obj, 1155 bool is_closed_archive) { 1156 assert(orig_obj != NULL, "must be"); 1157 assert(!is_archived_object_during_dumptime(orig_obj), "sanity"); 1158 1159 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1160 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1161 // If you get an error here, you probably made a change in the JDK library that has added 1162 // these objects that are referenced (directly or indirectly) by static fields. 1163 ResourceMark rm; 1164 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 1165 vm_direct_exit(1); 1166 } 1167 1168 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 1169 // them as Klass::_archived_mirror because they need to be specially restored at run time. 1170 // 1171 // If you get an error here, you probably made a change in the JDK library that has added a Class 1172 // object that is referenced (directly or indirectly) by static fields. 1173 if (java_lang_Class::is_instance(orig_obj)) { 1174 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1175 vm_direct_exit(1); 1176 } 1177 1178 oop archived_obj = find_archived_heap_object(orig_obj); 1179 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 1180 // To save time, don't walk strings that are already archived. They just contain 1181 // pointers to a type array, whose klass doesn't need to be recorded. 1182 return archived_obj; 1183 } 1184 1185 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1186 // orig_obj has already been archived and traced. Nothing more to do. 1187 return archived_obj; 1188 } else { 1189 set_has_been_seen_during_subgraph_recording(orig_obj); 1190 } 1191 1192 bool record_klasses_only = (archived_obj != NULL); 1193 if (archived_obj == NULL) { 1194 ++_num_new_archived_objs; 1195 archived_obj = archive_object(orig_obj); 1196 if (archived_obj == NULL) { 1197 // Skip archiving the sub-graph referenced from the current entry field. 1198 ResourceMark rm; 1199 log_error(cds, heap)( 1200 "Cannot archive the sub-graph referenced from %s object (" 1201 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.", 1202 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1203 if (level == 1) { 1204 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1205 // as the Java code will take care of initializing this field dynamically. 1206 return NULL; 1207 } else { 1208 // We don't know how to handle an object that has been archived, but some of its reachable 1209 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1210 // we have a real use case. 1211 vm_direct_exit(1); 1212 } 1213 } 1214 1215 if (java_lang_Module::is_instance(orig_obj)) { 1216 check_module_oop(orig_obj); 1217 java_lang_Module::set_module_entry(archived_obj, NULL); 1218 java_lang_Module::set_loader(archived_obj, NULL); 1219 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1220 // class_data will be restored explicitly at run time. 1221 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1222 orig_obj == SystemDictionary::java_system_loader() || 1223 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be"); 1224 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1225 } 1226 } 1227 1228 assert(archived_obj != NULL, "must be"); 1229 Klass *orig_k = orig_obj->klass(); 1230 subgraph_info->add_subgraph_object_klass(orig_k); 1231 1232 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1233 subgraph_info, orig_obj, archived_obj); 1234 orig_obj->oop_iterate(&walker); 1235 if (is_closed_archive && orig_k->is_instance_klass()) { 1236 check_closed_region_object(InstanceKlass::cast(orig_k)); 1237 } 1238 1239 check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive); 1240 return archived_obj; 1241 } 1242 1243 // 1244 // Start from the given static field in a java mirror and archive the 1245 // complete sub-graph of java heap objects that are reached directly 1246 // or indirectly from the starting object by following references. 1247 // Sub-graph archiving restrictions (current): 1248 // 1249 // - All classes of objects in the archived sub-graph (including the 1250 // entry class) must be boot class only. 1251 // - No java.lang.Class instance (java mirror) can be included inside 1252 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1253 // 1254 // The Java heap object sub-graph archiving process (see 1255 // WalkOopAndArchiveClosure): 1256 // 1257 // 1) Java object sub-graph archiving starts from a given static field 1258 // within a Class instance (java mirror). If the static field is a 1259 // reference field and points to a non-null java object, proceed to 1260 // the next step. 1261 // 1262 // 2) Archives the referenced java object. If an archived copy of the 1263 // current object already exists, updates the pointer in the archived 1264 // copy of the referencing object to point to the current archived object. 1265 // Otherwise, proceed to the next step. 1266 // 1267 // 3) Follows all references within the current java object and recursively 1268 // archive the sub-graph of objects starting from each reference. 1269 // 1270 // 4) Updates the pointer in the archived copy of referencing object to 1271 // point to the current archived object. 1272 // 1273 // 5) The Klass of the current java object is added to the list of Klasses 1274 // for loading and initializing before any object in the archived graph can 1275 // be accessed at runtime. 1276 // 1277 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1278 const char* klass_name, 1279 int field_offset, 1280 const char* field_name, 1281 bool is_closed_archive) { 1282 assert(DumpSharedSpaces, "dump time only"); 1283 assert(k->is_shared_boot_class(), "must be boot class"); 1284 1285 oop m = k->java_mirror(); 1286 1287 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1288 oop f = m->obj_field(field_offset); 1289 1290 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1291 1292 if (!CompressedOops::is_null(f)) { 1293 if (log_is_enabled(Trace, cds, heap)) { 1294 LogTarget(Trace, cds, heap) log; 1295 LogStream out(log); 1296 f->print_on(&out); 1297 } 1298 1299 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1300 1301 if (af == NULL) { 1302 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1303 klass_name, field_name); 1304 } else { 1305 // Note: the field value is not preserved in the archived mirror. 1306 // Record the field as a new subGraph entry point. The recorded 1307 // information is restored from the archive at runtime. 1308 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1309 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1310 } 1311 } else { 1312 // The field contains null, we still need to record the entry point, 1313 // so it can be restored at runtime. 1314 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1315 } 1316 } 1317 1318 #ifndef PRODUCT 1319 class VerifySharedOopClosure: public BasicOopIterateClosure { 1320 private: 1321 bool _is_archived; 1322 1323 public: 1324 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1325 1326 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1327 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1328 1329 protected: 1330 template <class T> void do_oop_work(T *p) { 1331 oop obj = RawAccess<>::oop_load(p); 1332 if (!CompressedOops::is_null(obj)) { 1333 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1334 } 1335 } 1336 }; 1337 1338 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1339 assert(DumpSharedSpaces, "dump time only"); 1340 assert(k->is_shared_boot_class(), "must be boot class"); 1341 1342 oop m = k->java_mirror(); 1343 oop f = m->obj_field(field_offset); 1344 if (!CompressedOops::is_null(f)) { 1345 verify_subgraph_from(f); 1346 } 1347 } 1348 1349 void HeapShared::verify_subgraph_from(oop orig_obj) { 1350 oop archived_obj = find_archived_heap_object(orig_obj); 1351 if (archived_obj == NULL) { 1352 // It's OK for the root of a subgraph to be not archived. See comments in 1353 // archive_reachable_objects_from(). 1354 return; 1355 } 1356 1357 // Verify that all objects reachable from orig_obj are archived. 1358 init_seen_objects_table(); 1359 verify_reachable_objects_from(orig_obj, false); 1360 delete_seen_objects_table(); 1361 1362 // Note: we could also verify that all objects reachable from the archived 1363 // copy of orig_obj can only point to archived objects, with: 1364 // init_seen_objects_table(); 1365 // verify_reachable_objects_from(archived_obj, true); 1366 // init_seen_objects_table(); 1367 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1368 // won't do it here. 1369 } 1370 1371 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1372 _num_total_verifications ++; 1373 if (!has_been_seen_during_subgraph_recording(obj)) { 1374 set_has_been_seen_during_subgraph_recording(obj); 1375 1376 if (is_archived) { 1377 assert(is_archived_object_during_dumptime(obj), "must be"); 1378 assert(find_archived_heap_object(obj) == NULL, "must be"); 1379 } else { 1380 assert(!is_archived_object_during_dumptime(obj), "must be"); 1381 assert(find_archived_heap_object(obj) != NULL, "must be"); 1382 } 1383 1384 VerifySharedOopClosure walker(is_archived); 1385 obj->oop_iterate(&walker); 1386 } 1387 } 1388 #endif 1389 1390 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1391 int HeapShared::_num_new_walked_objs; 1392 int HeapShared::_num_new_archived_objs; 1393 int HeapShared::_num_old_recorded_klasses; 1394 1395 int HeapShared::_num_total_subgraph_recordings = 0; 1396 int HeapShared::_num_total_walked_objs = 0; 1397 int HeapShared::_num_total_archived_objs = 0; 1398 int HeapShared::_num_total_recorded_klasses = 0; 1399 int HeapShared::_num_total_verifications = 0; 1400 1401 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1402 return _seen_objects_table->get(obj) != NULL; 1403 } 1404 1405 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1406 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1407 _seen_objects_table->put(obj, true); 1408 ++ _num_new_walked_objs; 1409 } 1410 1411 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1412 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1413 init_subgraph_info(k, is_full_module_graph); 1414 init_seen_objects_table(); 1415 _num_new_walked_objs = 0; 1416 _num_new_archived_objs = 0; 1417 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1418 } 1419 1420 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1421 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1422 _num_old_recorded_klasses; 1423 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1424 "walked %d objs, archived %d new objs, recorded %d classes", 1425 class_name, _num_new_walked_objs, _num_new_archived_objs, 1426 num_new_recorded_klasses); 1427 1428 delete_seen_objects_table(); 1429 1430 _num_total_subgraph_recordings ++; 1431 _num_total_walked_objs += _num_new_walked_objs; 1432 _num_total_archived_objs += _num_new_archived_objs; 1433 _num_total_recorded_klasses += num_new_recorded_klasses; 1434 } 1435 1436 class ArchivableStaticFieldFinder: public FieldClosure { 1437 InstanceKlass* _ik; 1438 Symbol* _field_name; 1439 bool _found; 1440 int _offset; 1441 public: 1442 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1443 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1444 1445 virtual void do_field(fieldDescriptor* fd) { 1446 if (fd->name() == _field_name) { 1447 assert(!_found, "fields cannot be overloaded"); 1448 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 1449 _found = true; 1450 _offset = fd->offset(); 1451 } 1452 } 1453 bool found() { return _found; } 1454 int offset() { return _offset; } 1455 }; 1456 1457 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1458 int num, TRAPS) { 1459 for (int i = 0; i < num; i++) { 1460 ArchivableStaticFieldInfo* info = &fields[i]; 1461 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1462 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1463 1464 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK); 1465 InstanceKlass* ik = InstanceKlass::cast(k); 1466 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1467 "Only support boot classes"); 1468 ik->initialize(CHECK); 1469 1470 ArchivableStaticFieldFinder finder(ik, field_name); 1471 ik->do_local_static_fields(&finder); 1472 assert(finder.found(), "field must exist"); 1473 1474 info->klass = ik; 1475 info->offset = finder.offset(); 1476 } 1477 } 1478 1479 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1480 assert(HeapShared::can_write(), "must be"); 1481 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1482 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1483 num_closed_archive_subgraph_entry_fields, 1484 CHECK); 1485 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1486 num_open_archive_subgraph_entry_fields, 1487 CHECK); 1488 if (MetaspaceShared::use_full_module_graph()) { 1489 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, 1490 num_fmg_open_archive_subgraph_entry_fields, 1491 CHECK); 1492 } 1493 } 1494 1495 void HeapShared::init_for_dumping(TRAPS) { 1496 if (HeapShared::can_write()) { 1497 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings(); 1498 init_subgraph_entry_fields(CHECK); 1499 } 1500 } 1501 1502 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1503 int num, bool is_closed_archive, 1504 bool is_full_module_graph) { 1505 _num_total_subgraph_recordings = 0; 1506 _num_total_walked_objs = 0; 1507 _num_total_archived_objs = 0; 1508 _num_total_recorded_klasses = 0; 1509 _num_total_verifications = 0; 1510 1511 // For each class X that has one or more archived fields: 1512 // [1] Dump the subgraph of each archived field 1513 // [2] Create a list of all the class of the objects that can be reached 1514 // by any of these static fields. 1515 // At runtime, these classes are initialized before X's archived fields 1516 // are restored by HeapShared::initialize_from_archived_subgraph(). 1517 int i; 1518 for (i = 0; i < num; ) { 1519 ArchivableStaticFieldInfo* info = &fields[i]; 1520 const char* klass_name = info->klass_name; 1521 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1522 1523 // If you have specified consecutive fields of the same klass in 1524 // fields[], these will be archived in the same 1525 // {start_recording_subgraph ... done_recording_subgraph} pass to 1526 // save time. 1527 for (; i < num; i++) { 1528 ArchivableStaticFieldInfo* f = &fields[i]; 1529 if (f->klass_name != klass_name) { 1530 break; 1531 } 1532 1533 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1534 f->offset, f->field_name, 1535 is_closed_archive); 1536 } 1537 done_recording_subgraph(info->klass, klass_name); 1538 } 1539 1540 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1541 is_closed_archive ? "closed" : "open", 1542 _num_total_subgraph_recordings); 1543 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1544 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1545 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1546 1547 #ifndef PRODUCT 1548 for (int i = 0; i < num; i++) { 1549 ArchivableStaticFieldInfo* f = &fields[i]; 1550 verify_subgraph_from_static_field(f->klass, f->offset); 1551 } 1552 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1553 #endif 1554 } 1555 1556 // Not all the strings in the global StringTable are dumped into the archive, because 1557 // some of those strings may be only referenced by classes that are excluded from 1558 // the archive. We need to explicitly mark the strings that are: 1559 // [1] used by classes that WILL be archived; 1560 // [2] included in the SharedArchiveConfigFile. 1561 void HeapShared::add_to_dumped_interned_strings(oop string) { 1562 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1563 bool created; 1564 _dumped_interned_strings->put_if_absent(string, true, &created); 1565 } 1566 1567 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1568 // region. This way we can quickly relocate all the pointers without using 1569 // BasicOopIterateClosure at runtime. 1570 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1571 void* _start; 1572 BitMap *_oopmap; 1573 int _num_total_oops; 1574 int _num_null_oops; 1575 public: 1576 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 1577 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1578 1579 virtual void do_oop(narrowOop* p) { 1580 _num_total_oops ++; 1581 narrowOop v = *p; 1582 if (!CompressedOops::is_null(v)) { 1583 size_t idx = p - (narrowOop*)_start; 1584 _oopmap->set_bit(idx); 1585 } else { 1586 _num_null_oops ++; 1587 } 1588 } 1589 virtual void do_oop(oop* p) { 1590 _num_total_oops ++; 1591 if ((*p) != NULL) { 1592 size_t idx = p - (oop*)_start; 1593 _oopmap->set_bit(idx); 1594 } else { 1595 _num_null_oops ++; 1596 } 1597 } 1598 int num_total_oops() const { return _num_total_oops; } 1599 int num_null_oops() const { return _num_null_oops; } 1600 }; 1601 1602 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1603 size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 1604 ResourceBitMap oopmap(num_bits); 1605 1606 HeapWord* p = region.start(); 1607 HeapWord* end = region.end(); 1608 FindEmbeddedNonNullPointers finder((void*)p, &oopmap); 1609 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1610 1611 int num_objs = 0; 1612 while (p < end) { 1613 oop o = cast_to_oop(p); 1614 o->oop_iterate(&finder); 1615 p += o->size(); 1616 if (DumpSharedSpaces) { 1617 builder->relocate_klass_ptr(o); 1618 } 1619 ++ num_objs; 1620 } 1621 1622 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1623 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1624 return oopmap; 1625 } 1626 1627 // Patch all the embedded oop pointers inside an archived heap region, 1628 // to be consistent with the runtime oop encoding. 1629 class PatchCompressedEmbeddedPointers: public BitMapClosure { 1630 narrowOop* _start; 1631 1632 public: 1633 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {} 1634 1635 bool do_bit(size_t offset) { 1636 narrowOop* p = _start + offset; 1637 narrowOop v = *p; 1638 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1639 oop o = HeapShared::decode_from_archive(v); 1640 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1641 return true; 1642 } 1643 }; 1644 1645 class PatchUncompressedEmbeddedPointers: public BitMapClosure { 1646 oop* _start; 1647 1648 public: 1649 PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {} 1650 1651 bool do_bit(size_t offset) { 1652 oop* p = _start + offset; 1653 intptr_t dumptime_oop = (intptr_t)((void*)*p); 1654 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time"); 1655 intptr_t runtime_oop = dumptime_oop + HeapShared::runtime_delta(); 1656 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop)); 1657 return true; 1658 } 1659 }; 1660 1661 // Patch all the non-null pointers that are embedded in the archived heap objects 1662 // in this region 1663 void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap, 1664 size_t oopmap_size_in_bits) { 1665 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1666 1667 #ifndef PRODUCT 1668 ResourceMark rm; 1669 ResourceBitMap checkBm = calculate_oopmap(region); 1670 assert(bm.is_same(checkBm), "sanity"); 1671 #endif 1672 1673 if (UseCompressedOops) { 1674 PatchCompressedEmbeddedPointers patcher((narrowOop*)region.start()); 1675 bm.iterate(&patcher); 1676 } else { 1677 PatchUncompressedEmbeddedPointers patcher((oop*)region.start()); 1678 bm.iterate(&patcher); 1679 } 1680 } 1681 1682 // The CDS archive remembers each heap object by its address at dump time, but 1683 // the heap object may be loaded at a different address at run time. This structure is used 1684 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index) 1685 // to their runtime addresses. 1686 struct LoadedArchiveHeapRegion { 1687 int _region_index; // index for FileMapInfo::space_at(index) 1688 size_t _region_size; // number of bytes in this region 1689 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region 1690 intx _runtime_offset; // If an object's dump time address P is within in this region, its 1691 // runtime address is P + _runtime_offset 1692 1693 static int comparator(const void* a, const void* b) { 1694 LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a; 1695 LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b; 1696 if (reg_a->_dumptime_base < reg_b->_dumptime_base) { 1697 return -1; 1698 } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) { 1699 return 0; 1700 } else { 1701 return 1; 1702 } 1703 } 1704 1705 uintptr_t top() { 1706 return _dumptime_base + _region_size; 1707 } 1708 }; 1709 1710 void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions, 1711 int num_loaded_regions) { 1712 _dumptime_base_0 = loaded_regions[0]._dumptime_base; 1713 _dumptime_base_1 = loaded_regions[1]._dumptime_base; 1714 _dumptime_base_2 = loaded_regions[2]._dumptime_base; 1715 _dumptime_base_3 = loaded_regions[3]._dumptime_base; 1716 _dumptime_top = loaded_regions[num_loaded_regions-1].top(); 1717 1718 _runtime_offset_0 = loaded_regions[0]._runtime_offset; 1719 _runtime_offset_1 = loaded_regions[1]._runtime_offset; 1720 _runtime_offset_2 = loaded_regions[2]._runtime_offset; 1721 _runtime_offset_3 = loaded_regions[3]._runtime_offset; 1722 1723 assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be"); 1724 if (num_loaded_regions < 4) { 1725 _dumptime_base_3 = UINTPTR_MAX; 1726 } 1727 if (num_loaded_regions < 3) { 1728 _dumptime_base_2 = UINTPTR_MAX; 1729 } 1730 } 1731 1732 bool HeapShared::can_load() { 1733 return Universe::heap()->can_load_archived_objects(); 1734 } 1735 1736 template <int NUM_LOADED_REGIONS> 1737 class PatchLoadedRegionPointers: public BitMapClosure { 1738 narrowOop* _start; 1739 intx _offset_0; 1740 intx _offset_1; 1741 intx _offset_2; 1742 intx _offset_3; 1743 uintptr_t _base_0; 1744 uintptr_t _base_1; 1745 uintptr_t _base_2; 1746 uintptr_t _base_3; 1747 uintptr_t _top; 1748 1749 static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions"); 1750 static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions"); 1751 static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions"); 1752 1753 public: 1754 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions) 1755 : _start(start), 1756 _offset_0(loaded_regions[0]._runtime_offset), 1757 _offset_1(loaded_regions[1]._runtime_offset), 1758 _offset_2(loaded_regions[2]._runtime_offset), 1759 _offset_3(loaded_regions[3]._runtime_offset), 1760 _base_0(loaded_regions[0]._dumptime_base), 1761 _base_1(loaded_regions[1]._dumptime_base), 1762 _base_2(loaded_regions[2]._dumptime_base), 1763 _base_3(loaded_regions[3]._dumptime_base) { 1764 _top = loaded_regions[NUM_LOADED_REGIONS-1].top(); 1765 } 1766 1767 bool do_bit(size_t offset) { 1768 narrowOop* p = _start + offset; 1769 narrowOop v = *p; 1770 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1771 uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v)); 1772 assert(_base_0 <= o && o < _top, "must be"); 1773 1774 1775 // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons. 1776 if (NUM_LOADED_REGIONS > 3 && o >= _base_3) { 1777 o += _offset_3; 1778 } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) { 1779 o += _offset_2; 1780 } else if (o >= _base_1) { 1781 o += _offset_1; 1782 } else { 1783 o += _offset_0; 1784 } 1785 HeapShared::assert_in_loaded_heap(o); 1786 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o)); 1787 return true; 1788 } 1789 }; 1790 1791 int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, 1792 MemRegion& archive_space) { 1793 size_t total_bytes = 0; 1794 int num_loaded_regions = 0; 1795 for (int i = MetaspaceShared::first_archive_heap_region; 1796 i <= MetaspaceShared::last_archive_heap_region; i++) { 1797 FileMapRegion* r = mapinfo->space_at(i); 1798 r->assert_is_heap_region(); 1799 if (r->used() > 0) { 1800 assert(is_aligned(r->used(), HeapWordSize), "must be"); 1801 total_bytes += r->used(); 1802 LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++]; 1803 ri->_region_index = i; 1804 ri->_region_size = r->used(); 1805 ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r); 1806 } 1807 } 1808 1809 assert(is_aligned(total_bytes, HeapWordSize), "must be"); 1810 size_t word_size = total_bytes / HeapWordSize; 1811 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size); 1812 if (buffer == nullptr) { 1813 return 0; 1814 } 1815 1816 archive_space = MemRegion(buffer, word_size); 1817 _loaded_heap_bottom = (uintptr_t)archive_space.start(); 1818 _loaded_heap_top = _loaded_heap_bottom + total_bytes; 1819 1820 return num_loaded_regions; 1821 } 1822 1823 void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions, 1824 uintptr_t buffer) { 1825 // Find the relocation offset of the pointers in each region 1826 qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion), 1827 LoadedArchiveHeapRegion::comparator); 1828 1829 uintptr_t p = buffer; 1830 for (int i = 0; i < num_loaded_regions; i++) { 1831 // This region will be loaded at p, so all objects inside this 1832 // region will be shifted by ri->offset 1833 LoadedArchiveHeapRegion* ri = &loaded_regions[i]; 1834 ri->_runtime_offset = p - ri->_dumptime_base; 1835 p += ri->_region_size; 1836 } 1837 assert(p == _loaded_heap_top, "must be"); 1838 } 1839 1840 bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, 1841 int num_loaded_regions, uintptr_t buffer) { 1842 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region(); 1843 if (bitmap_base == 0) { 1844 _loading_failed = true; 1845 return false; // OOM or CRC error 1846 } 1847 uintptr_t load_address = buffer; 1848 for (int i = 0; i < num_loaded_regions; i++) { 1849 LoadedArchiveHeapRegion* ri = &loaded_regions[i]; 1850 FileMapRegion* r = mapinfo->space_at(ri->_region_index); 1851 1852 if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { 1853 // There's no easy way to free the buffer, so we will fill it with zero later 1854 // in fill_failed_loaded_region(), and it will eventually be GC'ed. 1855 log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i); 1856 _loading_failed = true; 1857 return false; 1858 } 1859 log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT 1860 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT, 1861 ri->_region_index, load_address, load_address + ri->_region_size, 1862 ri->_region_size, ri->_runtime_offset); 1863 1864 uintptr_t oopmap = bitmap_base + r->oopmap_offset(); 1865 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits()); 1866 1867 if (num_loaded_regions == 4) { 1868 PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions); 1869 bm.iterate(&patcher); 1870 } else if (num_loaded_regions == 3) { 1871 PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions); 1872 bm.iterate(&patcher); 1873 } else { 1874 assert(num_loaded_regions == 2, "must be"); 1875 PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions); 1876 bm.iterate(&patcher); 1877 } 1878 1879 load_address += r->used(); 1880 } 1881 1882 return true; 1883 } 1884 1885 bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) { 1886 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift()); 1887 1888 LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions]; 1889 memset(loaded_regions, 0, sizeof(loaded_regions)); 1890 1891 MemRegion archive_space; 1892 int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space); 1893 if (num_loaded_regions <= 0) { 1894 return false; 1895 } 1896 sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start()); 1897 if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) { 1898 assert(_loading_failed, "must be"); 1899 return false; 1900 } 1901 1902 init_loaded_heap_relocation(loaded_regions, num_loaded_regions); 1903 _is_loaded = true; 1904 1905 return true; 1906 } 1907 1908 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure { 1909 ResourceHashtable<uintptr_t, bool>* _table; 1910 1911 public: 1912 VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {} 1913 1914 virtual void do_oop(narrowOop* p) { 1915 // This should be called before the loaded regions are modified, so all the embedded pointers 1916 // must be NULL, or must point to a valid object in the loaded regions. 1917 narrowOop v = *p; 1918 if (!CompressedOops::is_null(v)) { 1919 oop o = CompressedOops::decode_not_null(v); 1920 uintptr_t u = cast_from_oop<uintptr_t>(o); 1921 HeapShared::assert_in_loaded_heap(u); 1922 guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions"); 1923 } 1924 } 1925 virtual void do_oop(oop* p) { 1926 ShouldNotReachHere(); 1927 } 1928 }; 1929 1930 void HeapShared::finish_initialization() { 1931 if (is_loaded()) { 1932 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1933 HeapWord* top = (HeapWord*)_loaded_heap_top; 1934 1935 MemRegion archive_space = MemRegion(bottom, top); 1936 Universe::heap()->complete_loaded_archive_space(archive_space); 1937 } 1938 1939 if (VerifyArchivedFields <= 0 || !is_loaded()) { 1940 return; 1941 } 1942 1943 log_info(cds, heap)("Verify all oops and pointers in loaded heap"); 1944 1945 ResourceMark rm; 1946 ResourceHashtable<uintptr_t, bool> table; 1947 VerifyLoadedHeapEmbeddedPointers verifier(&table); 1948 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1949 HeapWord* top = (HeapWord*)_loaded_heap_top; 1950 1951 for (HeapWord* p = bottom; p < top; ) { 1952 oop o = cast_to_oop(p); 1953 table.put(cast_from_oop<uintptr_t>(o), true); 1954 p += o->size(); 1955 } 1956 1957 for (HeapWord* p = bottom; p < top; ) { 1958 oop o = cast_to_oop(p); 1959 o->oop_iterate(&verifier); 1960 p += o->size(); 1961 } 1962 } 1963 1964 void HeapShared::fill_failed_loaded_region() { 1965 assert(_loading_failed, "must be"); 1966 if (_loaded_heap_bottom != 0) { 1967 assert(_loaded_heap_top != 0, "must be"); 1968 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1969 HeapWord* top = (HeapWord*)_loaded_heap_top; 1970 Universe::heap()->fill_with_objects(bottom, top - bottom); 1971 } 1972 } 1973 1974 #endif // INCLUDE_CDS_JAVA_HEAP