1 /* 2 * Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveUtils.hpp" 28 #include "cds/cdsHeapVerifier.hpp" 29 #include "cds/filemap.hpp" 30 #include "cds/heapShared.inline.hpp" 31 #include "cds/metaspaceShared.hpp" 32 #include "classfile/classLoaderData.hpp" 33 #include "classfile/classLoaderDataShared.hpp" 34 #include "classfile/javaClasses.inline.hpp" 35 #include "classfile/moduleEntry.hpp" 36 #include "classfile/stringTable.hpp" 37 #include "classfile/symbolTable.hpp" 38 #include "classfile/systemDictionary.hpp" 39 #include "classfile/systemDictionaryShared.hpp" 40 #include "classfile/vmClasses.hpp" 41 #include "classfile/vmSymbols.hpp" 42 #include "gc/shared/collectedHeap.hpp" 43 #include "gc/shared/gcLocker.hpp" 44 #include "gc/shared/gcVMOperations.hpp" 45 #include "logging/log.hpp" 46 #include "logging/logStream.hpp" 47 #include "memory/iterator.inline.hpp" 48 #include "memory/metadataFactory.hpp" 49 #include "memory/metaspaceClosure.hpp" 50 #include "memory/resourceArea.hpp" 51 #include "memory/universe.hpp" 52 #include "oops/compressedOops.inline.hpp" 53 #include "oops/fieldStreams.inline.hpp" 54 #include "oops/objArrayOop.hpp" 55 #include "oops/oop.inline.hpp" 56 #include "prims/jvmtiExport.hpp" 57 #include "runtime/fieldDescriptor.inline.hpp" 58 #include "runtime/globals_extension.hpp" 59 #include "runtime/init.hpp" 60 #include "runtime/java.hpp" 61 #include "runtime/javaCalls.hpp" 62 #include "runtime/safepointVerifiers.hpp" 63 #include "utilities/bitMap.inline.hpp" 64 #include "utilities/copy.hpp" 65 #if INCLUDE_G1GC 66 #include "gc/g1/g1CollectedHeap.hpp" 67 #endif 68 69 #if INCLUDE_CDS_JAVA_HEAP 70 71 bool HeapShared::_closed_regions_mapped = false; 72 bool HeapShared::_open_regions_mapped = false; 73 bool HeapShared::_is_loaded = false; 74 bool HeapShared::_disable_writing = false; 75 address HeapShared::_narrow_oop_base; 76 int HeapShared::_narrow_oop_shift; 77 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 78 79 // Support for loaded heap. 80 uintptr_t HeapShared::_loaded_heap_bottom = 0; 81 uintptr_t HeapShared::_loaded_heap_top = 0; 82 uintptr_t HeapShared::_dumptime_base_0 = UINTPTR_MAX; 83 uintptr_t HeapShared::_dumptime_base_1 = UINTPTR_MAX; 84 uintptr_t HeapShared::_dumptime_base_2 = UINTPTR_MAX; 85 uintptr_t HeapShared::_dumptime_base_3 = UINTPTR_MAX; 86 uintptr_t HeapShared::_dumptime_top = 0; 87 intx HeapShared::_runtime_offset_0 = 0; 88 intx HeapShared::_runtime_offset_1 = 0; 89 intx HeapShared::_runtime_offset_2 = 0; 90 intx HeapShared::_runtime_offset_3 = 0; 91 bool HeapShared::_loading_failed = false; 92 93 // Suport for mapped heap (!UseCompressedOops only) 94 ptrdiff_t HeapShared::_runtime_delta = 0; 95 96 // 97 // If you add new entries to the following tables, you should know what you're doing! 98 // 99 100 // Entry fields for shareable subgraphs archived in the closed archive heap 101 // region. Warning: Objects in the subgraphs should not have reference fields 102 // assigned at runtime. 103 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 104 {"java/lang/Integer$IntegerCache", "archivedCache"}, 105 {"java/lang/Long$LongCache", "archivedCache"}, 106 {"java/lang/Byte$ByteCache", "archivedCache"}, 107 {"java/lang/Short$ShortCache", "archivedCache"}, 108 {"java/lang/Character$CharacterCache", "archivedCache"}, 109 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 110 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 111 }; 112 // Entry fields for subgraphs archived in the open archive heap region. 113 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 114 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 115 {"java/util/ImmutableCollections", "archivedObjects"}, 116 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 117 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 118 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 119 }; 120 121 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 122 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 123 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 124 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 125 {"java/lang/Module$ArchivedData", "archivedData"}, 126 }; 127 128 const static int num_closed_archive_subgraph_entry_fields = 129 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 130 const static int num_open_archive_subgraph_entry_fields = 131 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 132 const static int num_fmg_open_archive_subgraph_entry_fields = 133 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 134 135 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 136 OopHandle HeapShared::_roots; 137 138 #ifdef ASSERT 139 bool HeapShared::is_archived_object_during_dumptime(oop p) { 140 assert(HeapShared::can_write(), "must be"); 141 assert(DumpSharedSpaces, "this function is only used with -Xshare:dump"); 142 return Universe::heap()->is_archived_object(p); 143 } 144 #endif 145 146 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], int num, InstanceKlass* ik) { 147 for (int i = 0; i < num; i++) { 148 if (fields[i].klass == ik) { 149 return true; 150 } 151 } 152 return false; 153 } 154 155 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 156 return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, 157 num_closed_archive_subgraph_entry_fields, ik) || 158 is_subgraph_root_class_of(open_archive_subgraph_entry_fields, 159 num_open_archive_subgraph_entry_fields, ik) || 160 is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, 161 num_fmg_open_archive_subgraph_entry_fields, ik); 162 } 163 164 void HeapShared::fixup_regions() { 165 FileMapInfo* mapinfo = FileMapInfo::current_info(); 166 if (is_mapped()) { 167 mapinfo->fixup_mapped_heap_regions(); 168 } else if (_loading_failed) { 169 fill_failed_loaded_region(); 170 } 171 if (is_fully_available()) { 172 if (!MetaspaceShared::use_full_module_graph()) { 173 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 174 ClassLoaderDataShared::clear_archived_oops(); 175 } 176 } 177 SystemDictionaryShared::update_archived_mirror_native_pointers(); 178 } 179 180 unsigned HeapShared::oop_hash(oop const& p) { 181 // We are at a safepoint, so the object won't move. It's OK to use its 182 // address as the hashcode. 183 // We can't use p->identity_hash() as it's not available for primitive oops. 184 assert_at_safepoint(); 185 return (unsigned)(p2i(p) >> LogBytesPerWord); 186 } 187 188 static void reset_states(oop obj, TRAPS) { 189 Handle h_obj(THREAD, obj); 190 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 191 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 192 Symbol* method_sig = vmSymbols::void_method_signature(); 193 194 while (klass != NULL) { 195 Method* method = klass->find_method(method_name, method_sig); 196 if (method != NULL) { 197 assert(method->is_private(), "must be"); 198 if (log_is_enabled(Debug, cds)) { 199 ResourceMark rm(THREAD); 200 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 201 } 202 JavaValue result(T_VOID); 203 JavaCalls::call_special(&result, h_obj, klass, 204 method_name, method_sig, CHECK); 205 } 206 klass = klass->java_super(); 207 } 208 } 209 210 void HeapShared::reset_archived_object_states(TRAPS) { 211 assert(DumpSharedSpaces, "dump-time only"); 212 log_debug(cds)("Resetting platform loader"); 213 reset_states(SystemDictionary::java_platform_loader(), CHECK); 214 log_debug(cds)("Resetting system loader"); 215 reset_states(SystemDictionary::java_system_loader(), CHECK); 216 } 217 218 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 219 oop HeapShared::find_archived_heap_object(oop obj) { 220 assert(DumpSharedSpaces, "dump-time only"); 221 ArchivedObjectCache* cache = archived_object_cache(); 222 CachedOopInfo* p = cache->get(obj); 223 if (p != NULL) { 224 return p->_obj; 225 } else { 226 return NULL; 227 } 228 } 229 230 int HeapShared::append_root(oop obj) { 231 assert(DumpSharedSpaces, "dump-time only"); 232 233 // No GC should happen since we aren't scanning _pending_roots. 234 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 235 236 if (_pending_roots == NULL) { 237 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 238 } 239 240 return _pending_roots->append(obj); 241 } 242 243 objArrayOop HeapShared::roots() { 244 if (DumpSharedSpaces) { 245 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 246 if (!HeapShared::can_write()) { 247 return NULL; 248 } 249 } else { 250 assert(UseSharedSpaces, "must be"); 251 } 252 253 objArrayOop roots = (objArrayOop)_roots.resolve(); 254 assert(roots != NULL, "should have been initialized"); 255 return roots; 256 } 257 258 // Returns an objArray that contains all the roots of the archived objects 259 oop HeapShared::get_root(int index, bool clear) { 260 assert(index >= 0, "sanity"); 261 if (DumpSharedSpaces) { 262 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 263 assert(_pending_roots != NULL, "sanity"); 264 return _pending_roots->at(index); 265 } else { 266 assert(UseSharedSpaces, "must be"); 267 assert(!_roots.is_empty(), "must have loaded shared heap"); 268 oop result = roots()->obj_at(index); 269 if (clear) { 270 clear_root(index); 271 } 272 return result; 273 } 274 } 275 276 void HeapShared::clear_root(int index) { 277 assert(index >= 0, "sanity"); 278 assert(UseSharedSpaces, "must be"); 279 if (is_fully_available()) { 280 if (log_is_enabled(Debug, cds, heap)) { 281 oop old = roots()->obj_at(index); 282 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 283 } 284 roots()->obj_at_put(index, NULL); 285 } 286 } 287 288 oop HeapShared::archive_object(oop obj) { 289 assert(DumpSharedSpaces, "dump-time only"); 290 291 oop ao = find_archived_heap_object(obj); 292 if (ao != NULL) { 293 // already archived 294 return ao; 295 } 296 297 int len = obj->size(); 298 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 299 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 300 p2i(obj), (size_t)obj->size()); 301 return NULL; 302 } 303 304 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 305 if (archived_oop != NULL) { 306 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 307 // Reinitialize markword to remove age/marking/locking/etc. 308 // 309 // We need to retain the identity_hash, because it may have been used by some hashtables 310 // in the shared heap. This also has the side effect of pre-initializing the 311 // identity_hash for all shared objects, so they are less likely to be written 312 // into during run time, increasing the potential of memory sharing. 313 if (!(EnableValhalla && obj->mark().is_inline_type())) { 314 int hash_original = obj->identity_hash(); 315 archived_oop->set_mark(archived_oop->klass()->prototype_header().copy_set_hash(hash_original)); 316 assert(archived_oop->mark().is_unlocked(), "sanity"); 317 318 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 319 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 320 } 321 322 ArchivedObjectCache* cache = archived_object_cache(); 323 CachedOopInfo info = make_cached_oop_info(archived_oop); 324 cache->put(obj, info); 325 if (log_is_enabled(Debug, cds, heap)) { 326 ResourceMark rm; 327 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 328 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 329 } 330 } else { 331 log_error(cds, heap)( 332 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 333 p2i(obj)); 334 vm_direct_exit(-1, 335 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 336 SIZE_FORMAT "M", MaxHeapSize/M)); 337 } 338 return archived_oop; 339 } 340 341 void HeapShared::archive_klass_objects() { 342 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 343 assert(klasses != NULL, "sanity"); 344 for (int i = 0; i < klasses->length(); i++) { 345 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i)); 346 347 // archive mirror object 348 java_lang_Class::archive_mirror(k); 349 350 // archive the resolved_referenes array 351 if (k->is_instance_klass()) { 352 InstanceKlass* ik = InstanceKlass::cast(k); 353 ik->constants()->archive_resolved_references(); 354 } 355 } 356 } 357 358 // -- Handling of Enum objects 359 // Java Enum classes have synthetic <clinit> methods that look like this 360 // enum MyEnum {FOO, BAR} 361 // MyEnum::<clinint> { 362 // /*static final MyEnum*/ MyEnum::FOO = new MyEnum("FOO"); 363 // /*static final MyEnum*/ MyEnum::BAR = new MyEnum("BAR"); 364 // } 365 // 366 // If MyEnum::FOO object is referenced by any of the archived subgraphs, we must 367 // ensure the archived value equals (in object address) to the runtime value of 368 // MyEnum::FOO. 369 // 370 // However, since MyEnum::<clinint> is synthetically generated by javac, there's 371 // no way of programatically handling this inside the Java code (as you would handle 372 // ModuleLayer::EMPTY_LAYER, for example). 373 // 374 // Instead, we archive all static field of such Enum classes. At runtime, 375 // HeapShared::initialize_enum_klass() will skip the <clinit> method and pull 376 // the static fields out of the archived heap. 377 void HeapShared::check_enum_obj(int level, 378 KlassSubGraphInfo* subgraph_info, 379 oop orig_obj, 380 bool is_closed_archive) { 381 Klass* k = orig_obj->klass(); 382 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 383 if (!k->is_instance_klass()) { 384 return; 385 } 386 InstanceKlass* ik = InstanceKlass::cast(k); 387 if (ik->java_super() == vmClasses::Enum_klass() && !ik->has_archived_enum_objs()) { 388 ResourceMark rm; 389 ik->set_has_archived_enum_objs(); 390 relocated_k->set_has_archived_enum_objs(); 391 oop mirror = ik->java_mirror(); 392 393 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 394 if (fs.access_flags().is_static()) { 395 fieldDescriptor& fd = fs.field_descriptor(); 396 if (fd.field_type() != T_OBJECT && fd.field_type() != T_ARRAY) { 397 guarantee(false, "static field %s::%s must be T_OBJECT or T_ARRAY", 398 ik->external_name(), fd.name()->as_C_string()); 399 } 400 oop oop_field = mirror->obj_field(fd.offset()); 401 if (oop_field == NULL) { 402 guarantee(false, "static field %s::%s must not be null", 403 ik->external_name(), fd.name()->as_C_string()); 404 } else if (oop_field->klass() != ik && oop_field->klass() != ik->array_klass_or_null()) { 405 guarantee(false, "static field %s::%s is of the wrong type", 406 ik->external_name(), fd.name()->as_C_string()); 407 } 408 oop archived_oop_field = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive); 409 int root_index = append_root(archived_oop_field); 410 log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT " -> " INTPTR_FORMAT ")", 411 root_index, ik->external_name(), fd.name()->as_C_string(), 412 p2i((oopDesc*)oop_field), p2i((oopDesc*)archived_oop_field)); 413 SystemDictionaryShared::add_enum_klass_static_field(ik, root_index); 414 } 415 } 416 } 417 } 418 419 // See comments in HeapShared::check_enum_obj() 420 bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) { 421 if (!is_fully_available()) { 422 return false; 423 } 424 425 RunTimeClassInfo* info = RunTimeClassInfo::get_for(k); 426 assert(info != NULL, "sanity"); 427 428 if (log_is_enabled(Info, cds, heap)) { 429 ResourceMark rm; 430 log_info(cds, heap)("Initializing Enum class: %s", k->external_name()); 431 } 432 433 oop mirror = k->java_mirror(); 434 int i = 0; 435 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 436 if (fs.access_flags().is_static()) { 437 int root_index = info->enum_klass_static_field_root_index_at(i++); 438 fieldDescriptor& fd = fs.field_descriptor(); 439 assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be"); 440 mirror->obj_field_put(fd.offset(), get_root(root_index, /*clear=*/true)); 441 } 442 } 443 return true; 444 } 445 446 void HeapShared::run_full_gc_in_vm_thread() { 447 if (HeapShared::can_write()) { 448 // Avoid fragmentation while archiving heap objects. 449 // We do this inside a safepoint, so that no further allocation can happen after GC 450 // has finished. 451 if (GCLocker::is_active()) { 452 // Just checking for safety ... 453 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 454 // has been modified such that JNI code is executed in some clean up threads after 455 // we have finished class loading. 456 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 457 } else { 458 log_info(cds)("Run GC ..."); 459 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 460 log_info(cds)("Run GC done"); 461 } 462 } 463 } 464 465 void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions, 466 GrowableArray<MemRegion>* open_regions) { 467 468 G1HeapVerifier::verify_ready_for_archiving(); 469 470 { 471 NoSafepointVerifier nsv; 472 473 // Cache for recording where the archived objects are copied to 474 create_archived_object_cache(); 475 476 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 477 UseCompressedOops ? p2i(CompressedOops::begin()) : 478 p2i((address)G1CollectedHeap::heap()->reserved().start()), 479 UseCompressedOops ? p2i(CompressedOops::end()) : 480 p2i((address)G1CollectedHeap::heap()->reserved().end())); 481 log_info(cds)("Dumping objects to closed archive heap region ..."); 482 copy_closed_objects(closed_regions); 483 484 log_info(cds)("Dumping objects to open archive heap region ..."); 485 copy_open_objects(open_regions); 486 487 CDSHeapVerifier::verify(); 488 destroy_archived_object_cache(); 489 } 490 491 G1HeapVerifier::verify_archive_regions(); 492 } 493 494 void HeapShared::copy_closed_objects(GrowableArray<MemRegion>* closed_regions) { 495 assert(HeapShared::can_write(), "must be"); 496 497 G1CollectedHeap::heap()->begin_archive_alloc_range(); 498 499 // Archive interned string objects 500 StringTable::write_to_archive(_dumped_interned_strings); 501 502 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 503 num_closed_archive_subgraph_entry_fields, 504 true /* is_closed_archive */, 505 false /* is_full_module_graph */); 506 507 G1CollectedHeap::heap()->end_archive_alloc_range(closed_regions, 508 os::vm_allocation_granularity()); 509 } 510 511 void HeapShared::copy_open_objects(GrowableArray<MemRegion>* open_regions) { 512 assert(HeapShared::can_write(), "must be"); 513 514 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 515 516 java_lang_Class::archive_basic_type_mirrors(); 517 518 archive_klass_objects(); 519 520 archive_object_subgraphs(open_archive_subgraph_entry_fields, 521 num_open_archive_subgraph_entry_fields, 522 false /* is_closed_archive */, 523 false /* is_full_module_graph */); 524 if (MetaspaceShared::use_full_module_graph()) { 525 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 526 num_fmg_open_archive_subgraph_entry_fields, 527 false /* is_closed_archive */, 528 true /* is_full_module_graph */); 529 ClassLoaderDataShared::init_archived_oops(); 530 } 531 532 copy_roots(); 533 534 G1CollectedHeap::heap()->end_archive_alloc_range(open_regions, 535 os::vm_allocation_granularity()); 536 } 537 538 // Copy _pending_archive_roots into an objArray 539 void HeapShared::copy_roots() { 540 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 541 size_t size = objArrayOopDesc::object_size(length); 542 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 543 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 544 545 memset(mem, 0, size * BytesPerWord); 546 { 547 // This is copied from MemAllocator::finish 548 oopDesc::set_mark(mem, k->prototype_header()); 549 oopDesc::release_set_klass(mem, k); 550 } 551 { 552 // This is copied from ObjArrayAllocator::initialize 553 arrayOopDesc::set_length(mem, length); 554 } 555 556 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 557 for (int i = 0; i < length; i++) { 558 roots()->obj_at_put(i, _pending_roots->at(i)); 559 } 560 log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " words, klass = %p, obj = %p", length, size, k, mem); 561 } 562 563 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 564 _narrow_oop_base = base; 565 _narrow_oop_shift = shift; 566 } 567 568 // 569 // Subgraph archiving support 570 // 571 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 572 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 573 574 // Get the subgraph_info for Klass k. A new subgraph_info is created if 575 // there is no existing one for k. The subgraph_info records the relocated 576 // Klass* of the original k. 577 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 578 assert(DumpSharedSpaces, "dump time only"); 579 bool created; 580 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 581 KlassSubGraphInfo* info = 582 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(relocated_k, is_full_module_graph), 583 &created); 584 assert(created, "must not initialize twice"); 585 return info; 586 } 587 588 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 589 assert(DumpSharedSpaces, "dump time only"); 590 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 591 assert(info != NULL, "must have been initialized"); 592 return info; 593 } 594 595 // Add an entry field to the current KlassSubGraphInfo. 596 void KlassSubGraphInfo::add_subgraph_entry_field( 597 int static_field_offset, oop v, bool is_closed_archive) { 598 assert(DumpSharedSpaces, "dump time only"); 599 if (_subgraph_entry_fields == NULL) { 600 _subgraph_entry_fields = 601 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass); 602 } 603 _subgraph_entry_fields->append(static_field_offset); 604 _subgraph_entry_fields->append(HeapShared::append_root(v)); 605 } 606 607 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 608 // Only objects of boot classes can be included in sub-graph. 609 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 610 assert(DumpSharedSpaces, "dump time only"); 611 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k); 612 613 if (_subgraph_object_klasses == NULL) { 614 _subgraph_object_klasses = 615 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 616 } 617 618 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class"); 619 620 if (_k == relocated_k) { 621 // Don't add the Klass containing the sub-graph to it's own klass 622 // initialization list. 623 return; 624 } 625 626 if (relocated_k->is_instance_klass()) { 627 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 628 "must be boot class"); 629 // vmClasses::xxx_klass() are not updated, need to check 630 // the original Klass* 631 if (orig_k == vmClasses::String_klass() || 632 orig_k == vmClasses::Object_klass()) { 633 // Initialized early during VM initialization. No need to be added 634 // to the sub-graph object class list. 635 return; 636 } 637 } else if (relocated_k->is_objArray_klass()) { 638 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 639 if (abk->is_instance_klass()) { 640 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 641 "must be boot class"); 642 } 643 if (relocated_k == Universe::objectArrayKlassObj()) { 644 // Initialized early during Universe::genesis. No need to be added 645 // to the list. 646 return; 647 } 648 } else { 649 assert(relocated_k->is_typeArray_klass(), "must be"); 650 // Primitive type arrays are created early during Universe::genesis. 651 return; 652 } 653 654 if (log_is_enabled(Debug, cds, heap)) { 655 if (!_subgraph_object_klasses->contains(relocated_k)) { 656 ResourceMark rm; 657 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 658 } 659 } 660 661 _subgraph_object_klasses->append_if_missing(relocated_k); 662 _has_non_early_klasses |= is_non_early_klass(orig_k); 663 } 664 665 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 666 if (k->is_objArray_klass()) { 667 k = ObjArrayKlass::cast(k)->bottom_klass(); 668 } 669 if (k->is_instance_klass()) { 670 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 671 ResourceMark rm; 672 log_info(cds, heap)("non-early: %s", k->external_name()); 673 return true; 674 } else { 675 return false; 676 } 677 } else { 678 return false; 679 } 680 } 681 682 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 683 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 684 _k = info->klass(); 685 _entry_field_records = NULL; 686 _subgraph_object_klasses = NULL; 687 _is_full_module_graph = info->is_full_module_graph(); 688 689 if (_is_full_module_graph) { 690 // Consider all classes referenced by the full module graph as early -- we will be 691 // allocating objects of these classes during JVMTI early phase, so they cannot 692 // be processed by (non-early) JVMTI ClassFileLoadHook 693 _has_non_early_klasses = false; 694 } else { 695 _has_non_early_klasses = info->has_non_early_klasses(); 696 } 697 698 if (_has_non_early_klasses) { 699 ResourceMark rm; 700 log_info(cds, heap)( 701 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 702 _k->external_name()); 703 } 704 705 // populate the entry fields 706 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 707 if (entry_fields != NULL) { 708 int num_entry_fields = entry_fields->length(); 709 assert(num_entry_fields % 2 == 0, "sanity"); 710 _entry_field_records = 711 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 712 for (int i = 0 ; i < num_entry_fields; i++) { 713 _entry_field_records->at_put(i, entry_fields->at(i)); 714 } 715 } 716 717 // the Klasses of the objects in the sub-graphs 718 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 719 if (subgraph_object_klasses != NULL) { 720 int num_subgraphs_klasses = subgraph_object_klasses->length(); 721 _subgraph_object_klasses = 722 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 723 for (int i = 0; i < num_subgraphs_klasses; i++) { 724 Klass* subgraph_k = subgraph_object_klasses->at(i); 725 if (log_is_enabled(Info, cds, heap)) { 726 ResourceMark rm; 727 log_info(cds, heap)( 728 "Archived object klass %s (%2d) => %s", 729 _k->external_name(), i, subgraph_k->external_name()); 730 } 731 _subgraph_object_klasses->at_put(i, subgraph_k); 732 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 733 } 734 } 735 736 ArchivePtrMarker::mark_pointer(&_k); 737 ArchivePtrMarker::mark_pointer(&_entry_field_records); 738 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 739 } 740 741 struct CopyKlassSubGraphInfoToArchive : StackObj { 742 CompactHashtableWriter* _writer; 743 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 744 745 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 746 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 747 ArchivedKlassSubGraphInfoRecord* record = 748 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 749 record->init(&info); 750 751 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(klass); 752 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)relocated_k); 753 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 754 _writer->add(hash, delta); 755 } 756 return true; // keep on iterating 757 } 758 }; 759 760 // Build the records of archived subgraph infos, which include: 761 // - Entry points to all subgraphs from the containing class mirror. The entry 762 // points are static fields in the mirror. For each entry point, the field 763 // offset, value and is_closed_archive flag are recorded in the sub-graph 764 // info. The value is stored back to the corresponding field at runtime. 765 // - A list of klasses that need to be loaded/initialized before archived 766 // java object sub-graph can be accessed at runtime. 767 void HeapShared::write_subgraph_info_table() { 768 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 769 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 770 CompactHashtableStats stats; 771 772 _run_time_subgraph_info_table.reset(); 773 774 CompactHashtableWriter writer(d_table->_count, &stats); 775 CopyKlassSubGraphInfoToArchive copy(&writer); 776 d_table->iterate(©); 777 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 778 } 779 780 void HeapShared::serialize(SerializeClosure* soc) { 781 oop roots_oop = NULL; 782 783 if (soc->reading()) { 784 soc->do_oop(&roots_oop); // read from archive 785 assert(oopDesc::is_oop_or_null(roots_oop), "is oop"); 786 // Create an OopHandle only if we have actually mapped or loaded the roots 787 if (roots_oop != NULL) { 788 assert(HeapShared::is_fully_available(), "must be"); 789 _roots = OopHandle(Universe::vm_global(), roots_oop); 790 } 791 } else { 792 // writing 793 roots_oop = roots(); 794 soc->do_oop(&roots_oop); // write to archive 795 } 796 797 _run_time_subgraph_info_table.serialize_header(soc); 798 } 799 800 static void verify_the_heap(Klass* k, const char* which) { 801 if (VerifyArchivedFields > 0) { 802 ResourceMark rm; 803 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 804 which, k->external_name()); 805 806 VM_Verify verify_op; 807 VMThread::execute(&verify_op); 808 809 if (VerifyArchivedFields > 1 && is_init_completed()) { 810 // At this time, the oop->klass() of some archived objects in the heap may not 811 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 812 // have enough information (object size, oop maps, etc) so that a GC can be safely 813 // performed. 814 // 815 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 816 // to check for GC safety. 817 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 818 which, k->external_name()); 819 FlagSetting fs1(VerifyBeforeGC, true); 820 FlagSetting fs2(VerifyDuringGC, true); 821 FlagSetting fs3(VerifyAfterGC, true); 822 Universe::heap()->collect(GCCause::_java_lang_system_gc); 823 } 824 } 825 } 826 827 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 828 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 829 // 830 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 831 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 832 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 833 void HeapShared::resolve_classes(JavaThread* THREAD) { 834 if (!is_fully_available()) { 835 return; // nothing to do 836 } 837 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, 838 num_closed_archive_subgraph_entry_fields, 839 THREAD); 840 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, 841 num_open_archive_subgraph_entry_fields, 842 THREAD); 843 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, 844 num_fmg_open_archive_subgraph_entry_fields, 845 THREAD); 846 } 847 848 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], 849 int num, JavaThread* THREAD) { 850 for (int i = 0; i < num; i++) { 851 ArchivableStaticFieldInfo* info = &fields[i]; 852 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 853 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 854 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 855 resolve_classes_for_subgraph_of(k, THREAD); 856 } 857 } 858 859 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) { 860 ExceptionMark em(THREAD); 861 const ArchivedKlassSubGraphInfoRecord* record = 862 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 863 if (HAS_PENDING_EXCEPTION) { 864 CLEAR_PENDING_EXCEPTION; 865 } 866 if (record == NULL) { 867 clear_archived_roots_of(k); 868 } 869 } 870 871 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) { 872 if (!is_fully_available()) { 873 return; // nothing to do 874 } 875 876 ExceptionMark em(THREAD); 877 const ArchivedKlassSubGraphInfoRecord* record = 878 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 879 880 if (HAS_PENDING_EXCEPTION) { 881 CLEAR_PENDING_EXCEPTION; 882 // None of the field value will be set if there was an exception when initializing the classes. 883 // The java code will not see any of the archived objects in the 884 // subgraphs referenced from k in this case. 885 return; 886 } 887 888 if (record != NULL) { 889 init_archived_fields_for(k, record); 890 } 891 } 892 893 const ArchivedKlassSubGraphInfoRecord* 894 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 895 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 896 897 if (!k->is_shared()) { 898 return NULL; 899 } 900 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 901 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 902 903 // Initialize from archived data. Currently this is done only 904 // during VM initialization time. No lock is needed. 905 if (record != NULL) { 906 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 907 if (log_is_enabled(Info, cds, heap)) { 908 ResourceMark rm(THREAD); 909 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 910 k->external_name()); 911 } 912 return NULL; 913 } 914 915 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 916 if (log_is_enabled(Info, cds, heap)) { 917 ResourceMark rm(THREAD); 918 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 919 k->external_name()); 920 } 921 return NULL; 922 } 923 924 resolve_or_init(k, do_init, CHECK_NULL); 925 926 // Load/link/initialize the klasses of the objects in the subgraph. 927 // NULL class loader is used. 928 Array<Klass*>* klasses = record->subgraph_object_klasses(); 929 if (klasses != NULL) { 930 for (int i = 0; i < klasses->length(); i++) { 931 Klass* klass = klasses->at(i); 932 if (!klass->is_shared()) { 933 return NULL; 934 } 935 resolve_or_init(klass, do_init, CHECK_NULL); 936 } 937 } 938 } 939 940 return record; 941 } 942 943 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 944 if (!do_init) { 945 if (k->class_loader_data() == NULL) { 946 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 947 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 948 } 949 } else { 950 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 951 if (k->is_instance_klass()) { 952 InstanceKlass* ik = InstanceKlass::cast(k); 953 ik->initialize(CHECK); 954 } else if (k->is_objArray_klass()) { 955 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 956 oak->initialize(CHECK); 957 } 958 } 959 } 960 961 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 962 verify_the_heap(k, "before"); 963 964 // Load the subgraph entry fields from the record and store them back to 965 // the corresponding fields within the mirror. 966 oop m = k->java_mirror(); 967 Array<int>* entry_field_records = record->entry_field_records(); 968 if (entry_field_records != NULL) { 969 int efr_len = entry_field_records->length(); 970 assert(efr_len % 2 == 0, "sanity"); 971 for (int i = 0; i < efr_len; i += 2) { 972 int field_offset = entry_field_records->at(i); 973 int root_index = entry_field_records->at(i+1); 974 oop v = get_root(root_index, /*clear=*/true); 975 m->obj_field_put(field_offset, v); 976 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 977 } 978 979 // Done. Java code can see the archived sub-graphs referenced from k's 980 // mirror after this point. 981 if (log_is_enabled(Info, cds, heap)) { 982 ResourceMark rm; 983 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 984 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 985 } 986 } 987 988 verify_the_heap(k, "after "); 989 } 990 991 void HeapShared::clear_archived_roots_of(Klass* k) { 992 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 993 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 994 if (record != NULL) { 995 Array<int>* entry_field_records = record->entry_field_records(); 996 if (entry_field_records != NULL) { 997 int efr_len = entry_field_records->length(); 998 assert(efr_len % 2 == 0, "sanity"); 999 for (int i = 0; i < efr_len; i += 2) { 1000 int root_index = entry_field_records->at(i+1); 1001 clear_root(root_index); 1002 } 1003 } 1004 } 1005 } 1006 1007 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 1008 int _level; 1009 bool _is_closed_archive; 1010 bool _record_klasses_only; 1011 KlassSubGraphInfo* _subgraph_info; 1012 oop _orig_referencing_obj; 1013 oop _archived_referencing_obj; 1014 1015 // The following are for maintaining a stack for determining 1016 // CachedOopInfo::_referrer 1017 static WalkOopAndArchiveClosure* _current; 1018 WalkOopAndArchiveClosure* _last; 1019 public: 1020 WalkOopAndArchiveClosure(int level, 1021 bool is_closed_archive, 1022 bool record_klasses_only, 1023 KlassSubGraphInfo* subgraph_info, 1024 oop orig, oop archived) : 1025 _level(level), _is_closed_archive(is_closed_archive), 1026 _record_klasses_only(record_klasses_only), 1027 _subgraph_info(subgraph_info), 1028 _orig_referencing_obj(orig), _archived_referencing_obj(archived) { 1029 _last = _current; 1030 _current = this; 1031 } 1032 ~WalkOopAndArchiveClosure() { 1033 _current = _last; 1034 } 1035 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1036 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 1037 1038 protected: 1039 template <class T> void do_oop_work(T *p) { 1040 oop obj = RawAccess<>::oop_load(p); 1041 if (!CompressedOops::is_null(obj)) { 1042 assert(!HeapShared::is_archived_object_during_dumptime(obj), 1043 "original objects must not point to archived objects"); 1044 1045 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 1046 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 1047 1048 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 1049 ResourceMark rm; 1050 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size " SIZE_FORMAT " %s", _level, 1051 _orig_referencing_obj->klass()->external_name(), field_delta, 1052 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1053 LogTarget(Trace, cds, heap) log; 1054 LogStream out(log); 1055 obj->print_on(&out); 1056 } 1057 1058 oop archived = HeapShared::archive_reachable_objects_from( 1059 _level + 1, _subgraph_info, obj, _is_closed_archive); 1060 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 1061 assert(HeapShared::is_archived_object_during_dumptime(archived), "must be"); 1062 1063 if (!_record_klasses_only) { 1064 // Update the reference in the archived copy of the referencing object. 1065 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 1066 _level, p2i(new_p), p2i(obj), p2i(archived)); 1067 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 1068 } 1069 } 1070 } 1071 1072 public: 1073 static WalkOopAndArchiveClosure* current() { return _current; } 1074 oop orig_referencing_obj() { return _orig_referencing_obj; } 1075 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1076 }; 1077 1078 WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = NULL; 1079 1080 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop orig_obj) { 1081 CachedOopInfo info; 1082 WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); 1083 1084 info._subgraph_info = (walker == NULL) ? NULL : walker->subgraph_info(); 1085 info._referrer = (walker == NULL) ? NULL : walker->orig_referencing_obj(); 1086 info._obj = orig_obj; 1087 1088 return info; 1089 } 1090 1091 void HeapShared::check_closed_region_object(InstanceKlass* k) { 1092 // Check fields in the object 1093 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 1094 if (!fs.access_flags().is_static()) { 1095 BasicType ft = fs.field_descriptor().field_type(); 1096 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 1097 ResourceMark rm; 1098 log_warning(cds, heap)( 1099 "Please check reference field in %s instance in closed archive heap region: %s %s", 1100 k->external_name(), (fs.name())->as_C_string(), 1101 (fs.signature())->as_C_string()); 1102 } 1103 } 1104 } 1105 } 1106 1107 void HeapShared::check_module_oop(oop orig_module_obj) { 1108 assert(DumpSharedSpaces, "must be"); 1109 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 1110 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 1111 if (orig_module_ent == NULL) { 1112 // These special Module objects are created in Java code. They are not 1113 // defined via Modules::define_module(), so they don't have a ModuleEntry: 1114 // java.lang.Module::ALL_UNNAMED_MODULE 1115 // java.lang.Module::EVERYONE_MODULE 1116 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 1117 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 1118 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 1119 } else { 1120 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 1121 assert(loader_data->is_builtin_class_loader_data(), "must be"); 1122 } 1123 } 1124 1125 1126 // (1) If orig_obj has not been archived yet, archive it. 1127 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1128 // trace all objects that are reachable from it, and make sure these objects are archived. 1129 // (3) Record the klasses of all orig_obj and all reachable objects. 1130 oop HeapShared::archive_reachable_objects_from(int level, 1131 KlassSubGraphInfo* subgraph_info, 1132 oop orig_obj, 1133 bool is_closed_archive) { 1134 assert(orig_obj != NULL, "must be"); 1135 assert(!is_archived_object_during_dumptime(orig_obj), "sanity"); 1136 1137 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1138 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1139 // If you get an error here, you probably made a change in the JDK library that has added 1140 // these objects that are referenced (directly or indirectly) by static fields. 1141 ResourceMark rm; 1142 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 1143 vm_direct_exit(1); 1144 } 1145 1146 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 1147 // them as Klass::_archived_mirror because they need to be specially restored at run time. 1148 // 1149 // If you get an error here, you probably made a change in the JDK library that has added a Class 1150 // object that is referenced (directly or indirectly) by static fields. 1151 if (java_lang_Class::is_instance(orig_obj)) { 1152 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1153 vm_direct_exit(1); 1154 } 1155 1156 oop archived_obj = find_archived_heap_object(orig_obj); 1157 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 1158 // To save time, don't walk strings that are already archived. They just contain 1159 // pointers to a type array, whose klass doesn't need to be recorded. 1160 return archived_obj; 1161 } 1162 1163 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1164 // orig_obj has already been archived and traced. Nothing more to do. 1165 return archived_obj; 1166 } else { 1167 set_has_been_seen_during_subgraph_recording(orig_obj); 1168 } 1169 1170 bool record_klasses_only = (archived_obj != NULL); 1171 if (archived_obj == NULL) { 1172 ++_num_new_archived_objs; 1173 archived_obj = archive_object(orig_obj); 1174 if (archived_obj == NULL) { 1175 // Skip archiving the sub-graph referenced from the current entry field. 1176 ResourceMark rm; 1177 log_error(cds, heap)( 1178 "Cannot archive the sub-graph referenced from %s object (" 1179 PTR_FORMAT ") size " SIZE_FORMAT ", skipped.", 1180 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1181 if (level == 1) { 1182 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1183 // as the Java code will take care of initializing this field dynamically. 1184 return NULL; 1185 } else { 1186 // We don't know how to handle an object that has been archived, but some of its reachable 1187 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1188 // we have a real use case. 1189 vm_direct_exit(1); 1190 } 1191 } 1192 1193 if (java_lang_Module::is_instance(orig_obj)) { 1194 check_module_oop(orig_obj); 1195 java_lang_Module::set_module_entry(archived_obj, NULL); 1196 java_lang_Module::set_loader(archived_obj, NULL); 1197 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1198 // class_data will be restored explicitly at run time. 1199 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1200 orig_obj == SystemDictionary::java_system_loader() || 1201 java_lang_ClassLoader::loader_data(orig_obj) == NULL, "must be"); 1202 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1203 } 1204 } 1205 1206 assert(archived_obj != NULL, "must be"); 1207 Klass *orig_k = orig_obj->klass(); 1208 subgraph_info->add_subgraph_object_klass(orig_k); 1209 1210 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1211 subgraph_info, orig_obj, archived_obj); 1212 orig_obj->oop_iterate(&walker); 1213 if (is_closed_archive && orig_k->is_instance_klass()) { 1214 check_closed_region_object(InstanceKlass::cast(orig_k)); 1215 } 1216 1217 check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive); 1218 return archived_obj; 1219 } 1220 1221 // 1222 // Start from the given static field in a java mirror and archive the 1223 // complete sub-graph of java heap objects that are reached directly 1224 // or indirectly from the starting object by following references. 1225 // Sub-graph archiving restrictions (current): 1226 // 1227 // - All classes of objects in the archived sub-graph (including the 1228 // entry class) must be boot class only. 1229 // - No java.lang.Class instance (java mirror) can be included inside 1230 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1231 // 1232 // The Java heap object sub-graph archiving process (see 1233 // WalkOopAndArchiveClosure): 1234 // 1235 // 1) Java object sub-graph archiving starts from a given static field 1236 // within a Class instance (java mirror). If the static field is a 1237 // refererence field and points to a non-null java object, proceed to 1238 // the next step. 1239 // 1240 // 2) Archives the referenced java object. If an archived copy of the 1241 // current object already exists, updates the pointer in the archived 1242 // copy of the referencing object to point to the current archived object. 1243 // Otherwise, proceed to the next step. 1244 // 1245 // 3) Follows all references within the current java object and recursively 1246 // archive the sub-graph of objects starting from each reference. 1247 // 1248 // 4) Updates the pointer in the archived copy of referencing object to 1249 // point to the current archived object. 1250 // 1251 // 5) The Klass of the current java object is added to the list of Klasses 1252 // for loading and initialzing before any object in the archived graph can 1253 // be accessed at runtime. 1254 // 1255 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1256 const char* klass_name, 1257 int field_offset, 1258 const char* field_name, 1259 bool is_closed_archive) { 1260 assert(DumpSharedSpaces, "dump time only"); 1261 assert(k->is_shared_boot_class(), "must be boot class"); 1262 1263 oop m = k->java_mirror(); 1264 1265 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1266 oop f = m->obj_field(field_offset); 1267 1268 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1269 1270 if (!CompressedOops::is_null(f)) { 1271 if (log_is_enabled(Trace, cds, heap)) { 1272 LogTarget(Trace, cds, heap) log; 1273 LogStream out(log); 1274 f->print_on(&out); 1275 } 1276 1277 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1278 1279 if (af == NULL) { 1280 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1281 klass_name, field_name); 1282 } else { 1283 // Note: the field value is not preserved in the archived mirror. 1284 // Record the field as a new subGraph entry point. The recorded 1285 // information is restored from the archive at runtime. 1286 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1287 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1288 } 1289 } else { 1290 // The field contains null, we still need to record the entry point, 1291 // so it can be restored at runtime. 1292 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1293 } 1294 } 1295 1296 #ifndef PRODUCT 1297 class VerifySharedOopClosure: public BasicOopIterateClosure { 1298 private: 1299 bool _is_archived; 1300 1301 public: 1302 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1303 1304 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1305 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1306 1307 protected: 1308 template <class T> void do_oop_work(T *p) { 1309 oop obj = RawAccess<>::oop_load(p); 1310 if (!CompressedOops::is_null(obj)) { 1311 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1312 } 1313 } 1314 }; 1315 1316 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1317 assert(DumpSharedSpaces, "dump time only"); 1318 assert(k->is_shared_boot_class(), "must be boot class"); 1319 1320 oop m = k->java_mirror(); 1321 oop f = m->obj_field(field_offset); 1322 if (!CompressedOops::is_null(f)) { 1323 verify_subgraph_from(f); 1324 } 1325 } 1326 1327 void HeapShared::verify_subgraph_from(oop orig_obj) { 1328 oop archived_obj = find_archived_heap_object(orig_obj); 1329 if (archived_obj == NULL) { 1330 // It's OK for the root of a subgraph to be not archived. See comments in 1331 // archive_reachable_objects_from(). 1332 return; 1333 } 1334 1335 // Verify that all objects reachable from orig_obj are archived. 1336 init_seen_objects_table(); 1337 verify_reachable_objects_from(orig_obj, false); 1338 delete_seen_objects_table(); 1339 1340 // Note: we could also verify that all objects reachable from the archived 1341 // copy of orig_obj can only point to archived objects, with: 1342 // init_seen_objects_table(); 1343 // verify_reachable_objects_from(archived_obj, true); 1344 // init_seen_objects_table(); 1345 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1346 // won't do it here. 1347 } 1348 1349 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1350 _num_total_verifications ++; 1351 if (!has_been_seen_during_subgraph_recording(obj)) { 1352 set_has_been_seen_during_subgraph_recording(obj); 1353 1354 if (is_archived) { 1355 assert(is_archived_object_during_dumptime(obj), "must be"); 1356 assert(find_archived_heap_object(obj) == NULL, "must be"); 1357 } else { 1358 assert(!is_archived_object_during_dumptime(obj), "must be"); 1359 assert(find_archived_heap_object(obj) != NULL, "must be"); 1360 } 1361 1362 VerifySharedOopClosure walker(is_archived); 1363 obj->oop_iterate(&walker); 1364 } 1365 } 1366 #endif 1367 1368 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1369 int HeapShared::_num_new_walked_objs; 1370 int HeapShared::_num_new_archived_objs; 1371 int HeapShared::_num_old_recorded_klasses; 1372 1373 int HeapShared::_num_total_subgraph_recordings = 0; 1374 int HeapShared::_num_total_walked_objs = 0; 1375 int HeapShared::_num_total_archived_objs = 0; 1376 int HeapShared::_num_total_recorded_klasses = 0; 1377 int HeapShared::_num_total_verifications = 0; 1378 1379 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1380 return _seen_objects_table->get(obj) != NULL; 1381 } 1382 1383 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1384 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1385 _seen_objects_table->put(obj, true); 1386 ++ _num_new_walked_objs; 1387 } 1388 1389 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1390 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1391 init_subgraph_info(k, is_full_module_graph); 1392 init_seen_objects_table(); 1393 _num_new_walked_objs = 0; 1394 _num_new_archived_objs = 0; 1395 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1396 } 1397 1398 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1399 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1400 _num_old_recorded_klasses; 1401 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1402 "walked %d objs, archived %d new objs, recorded %d classes", 1403 class_name, _num_new_walked_objs, _num_new_archived_objs, 1404 num_new_recorded_klasses); 1405 1406 delete_seen_objects_table(); 1407 1408 _num_total_subgraph_recordings ++; 1409 _num_total_walked_objs += _num_new_walked_objs; 1410 _num_total_archived_objs += _num_new_archived_objs; 1411 _num_total_recorded_klasses += num_new_recorded_klasses; 1412 } 1413 1414 class ArchivableStaticFieldFinder: public FieldClosure { 1415 InstanceKlass* _ik; 1416 Symbol* _field_name; 1417 bool _found; 1418 int _offset; 1419 public: 1420 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1421 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1422 1423 virtual void do_field(fieldDescriptor* fd) { 1424 if (fd->name() == _field_name) { 1425 assert(!_found, "fields cannot be overloaded"); 1426 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 1427 _found = true; 1428 _offset = fd->offset(); 1429 } 1430 } 1431 bool found() { return _found; } 1432 int offset() { return _offset; } 1433 }; 1434 1435 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1436 int num, TRAPS) { 1437 for (int i = 0; i < num; i++) { 1438 ArchivableStaticFieldInfo* info = &fields[i]; 1439 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1440 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1441 1442 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK); 1443 InstanceKlass* ik = InstanceKlass::cast(k); 1444 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1445 "Only support boot classes"); 1446 ik->initialize(CHECK); 1447 1448 ArchivableStaticFieldFinder finder(ik, field_name); 1449 ik->do_local_static_fields(&finder); 1450 assert(finder.found(), "field must exist"); 1451 1452 info->klass = ik; 1453 info->offset = finder.offset(); 1454 } 1455 } 1456 1457 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1458 assert(HeapShared::can_write(), "must be"); 1459 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1460 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1461 num_closed_archive_subgraph_entry_fields, 1462 CHECK); 1463 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1464 num_open_archive_subgraph_entry_fields, 1465 CHECK); 1466 if (MetaspaceShared::use_full_module_graph()) { 1467 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, 1468 num_fmg_open_archive_subgraph_entry_fields, 1469 CHECK); 1470 } 1471 } 1472 1473 void HeapShared::init_for_dumping(TRAPS) { 1474 if (HeapShared::can_write()) { 1475 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings(); 1476 init_subgraph_entry_fields(CHECK); 1477 } 1478 } 1479 1480 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1481 int num, bool is_closed_archive, 1482 bool is_full_module_graph) { 1483 _num_total_subgraph_recordings = 0; 1484 _num_total_walked_objs = 0; 1485 _num_total_archived_objs = 0; 1486 _num_total_recorded_klasses = 0; 1487 _num_total_verifications = 0; 1488 1489 // For each class X that has one or more archived fields: 1490 // [1] Dump the subgraph of each archived field 1491 // [2] Create a list of all the class of the objects that can be reached 1492 // by any of these static fields. 1493 // At runtime, these classes are initialized before X's archived fields 1494 // are restored by HeapShared::initialize_from_archived_subgraph(). 1495 int i; 1496 for (i = 0; i < num; ) { 1497 ArchivableStaticFieldInfo* info = &fields[i]; 1498 const char* klass_name = info->klass_name; 1499 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1500 1501 // If you have specified consecutive fields of the same klass in 1502 // fields[], these will be archived in the same 1503 // {start_recording_subgraph ... done_recording_subgraph} pass to 1504 // save time. 1505 for (; i < num; i++) { 1506 ArchivableStaticFieldInfo* f = &fields[i]; 1507 if (f->klass_name != klass_name) { 1508 break; 1509 } 1510 1511 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1512 f->offset, f->field_name, 1513 is_closed_archive); 1514 } 1515 done_recording_subgraph(info->klass, klass_name); 1516 } 1517 1518 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1519 is_closed_archive ? "closed" : "open", 1520 _num_total_subgraph_recordings); 1521 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1522 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1523 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1524 1525 #ifndef PRODUCT 1526 for (int i = 0; i < num; i++) { 1527 ArchivableStaticFieldInfo* f = &fields[i]; 1528 verify_subgraph_from_static_field(f->klass, f->offset); 1529 } 1530 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1531 #endif 1532 } 1533 1534 // Not all the strings in the global StringTable are dumped into the archive, because 1535 // some of those strings may be only referenced by classes that are excluded from 1536 // the archive. We need to explicitly mark the strings that are: 1537 // [1] used by classes that WILL be archived; 1538 // [2] included in the SharedArchiveConfigFile. 1539 void HeapShared::add_to_dumped_interned_strings(oop string) { 1540 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1541 bool created; 1542 _dumped_interned_strings->put_if_absent(string, true, &created); 1543 } 1544 1545 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1546 // region. This way we can quickly relocate all the pointers without using 1547 // BasicOopIterateClosure at runtime. 1548 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1549 void* _start; 1550 BitMap *_oopmap; 1551 int _num_total_oops; 1552 int _num_null_oops; 1553 public: 1554 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 1555 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1556 1557 virtual void do_oop(narrowOop* p) { 1558 _num_total_oops ++; 1559 narrowOop v = *p; 1560 if (!CompressedOops::is_null(v)) { 1561 size_t idx = p - (narrowOop*)_start; 1562 _oopmap->set_bit(idx); 1563 } else { 1564 _num_null_oops ++; 1565 } 1566 } 1567 virtual void do_oop(oop* p) { 1568 _num_total_oops ++; 1569 if ((*p) != NULL) { 1570 size_t idx = p - (oop*)_start; 1571 _oopmap->set_bit(idx); 1572 } else { 1573 _num_null_oops ++; 1574 } 1575 } 1576 int num_total_oops() const { return _num_total_oops; } 1577 int num_null_oops() const { return _num_null_oops; } 1578 }; 1579 1580 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1581 size_t num_bits = region.byte_size() / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 1582 ResourceBitMap oopmap(num_bits); 1583 1584 HeapWord* p = region.start(); 1585 HeapWord* end = region.end(); 1586 FindEmbeddedNonNullPointers finder((void*)p, &oopmap); 1587 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1588 1589 int num_objs = 0; 1590 while (p < end) { 1591 oop o = cast_to_oop(p); 1592 o->oop_iterate(&finder); 1593 p += o->size(); 1594 if (DumpSharedSpaces) { 1595 builder->relocate_klass_ptr(o); 1596 } 1597 ++ num_objs; 1598 } 1599 1600 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1601 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1602 return oopmap; 1603 } 1604 1605 // Patch all the embedded oop pointers inside an archived heap region, 1606 // to be consistent with the runtime oop encoding. 1607 class PatchCompressedEmbeddedPointers: public BitMapClosure { 1608 narrowOop* _start; 1609 1610 public: 1611 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {} 1612 1613 bool do_bit(size_t offset) { 1614 narrowOop* p = _start + offset; 1615 narrowOop v = *p; 1616 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1617 oop o = HeapShared::decode_from_archive(v); 1618 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1619 return true; 1620 } 1621 }; 1622 1623 class PatchUncompressedEmbeddedPointers: public BitMapClosure { 1624 oop* _start; 1625 1626 public: 1627 PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {} 1628 1629 bool do_bit(size_t offset) { 1630 oop* p = _start + offset; 1631 intptr_t dumptime_oop = (intptr_t)((void*)*p); 1632 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time"); 1633 intptr_t runtime_oop = dumptime_oop + HeapShared::runtime_delta(); 1634 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop)); 1635 return true; 1636 } 1637 }; 1638 1639 // Patch all the non-null pointers that are embedded in the archived heap objects 1640 // in this region 1641 void HeapShared::patch_embedded_pointers(MemRegion region, address oopmap, 1642 size_t oopmap_size_in_bits) { 1643 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1644 1645 #ifndef PRODUCT 1646 ResourceMark rm; 1647 ResourceBitMap checkBm = calculate_oopmap(region); 1648 assert(bm.is_same(checkBm), "sanity"); 1649 #endif 1650 1651 if (UseCompressedOops) { 1652 PatchCompressedEmbeddedPointers patcher((narrowOop*)region.start()); 1653 bm.iterate(&patcher); 1654 } else { 1655 PatchUncompressedEmbeddedPointers patcher((oop*)region.start()); 1656 bm.iterate(&patcher); 1657 } 1658 } 1659 1660 // The CDS archive remembers each heap object by its address at dump time, but 1661 // the heap object may be loaded at a different address at run time. This structure is used 1662 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index) 1663 // to their runtime addresses. 1664 struct LoadedArchiveHeapRegion { 1665 int _region_index; // index for FileMapInfo::space_at(index) 1666 size_t _region_size; // number of bytes in this region 1667 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region 1668 intx _runtime_offset; // If an object's dump time address P is within in this region, its 1669 // runtime address is P + _runtime_offset 1670 1671 static int comparator(const void* a, const void* b) { 1672 LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a; 1673 LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b; 1674 if (reg_a->_dumptime_base < reg_b->_dumptime_base) { 1675 return -1; 1676 } else if (reg_a->_dumptime_base == reg_b->_dumptime_base) { 1677 return 0; 1678 } else { 1679 return 1; 1680 } 1681 } 1682 1683 uintptr_t top() { 1684 return _dumptime_base + _region_size; 1685 } 1686 }; 1687 1688 void HeapShared::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions, 1689 int num_loaded_regions) { 1690 _dumptime_base_0 = loaded_regions[0]._dumptime_base; 1691 _dumptime_base_1 = loaded_regions[1]._dumptime_base; 1692 _dumptime_base_2 = loaded_regions[2]._dumptime_base; 1693 _dumptime_base_3 = loaded_regions[3]._dumptime_base; 1694 _dumptime_top = loaded_regions[num_loaded_regions-1].top(); 1695 1696 _runtime_offset_0 = loaded_regions[0]._runtime_offset; 1697 _runtime_offset_1 = loaded_regions[1]._runtime_offset; 1698 _runtime_offset_2 = loaded_regions[2]._runtime_offset; 1699 _runtime_offset_3 = loaded_regions[3]._runtime_offset; 1700 1701 assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be"); 1702 if (num_loaded_regions < 4) { 1703 _dumptime_base_3 = UINTPTR_MAX; 1704 } 1705 if (num_loaded_regions < 3) { 1706 _dumptime_base_2 = UINTPTR_MAX; 1707 } 1708 } 1709 1710 bool HeapShared::can_load() { 1711 return Universe::heap()->can_load_archived_objects(); 1712 } 1713 1714 template <int NUM_LOADED_REGIONS> 1715 class PatchLoadedRegionPointers: public BitMapClosure { 1716 narrowOop* _start; 1717 intx _offset_0; 1718 intx _offset_1; 1719 intx _offset_2; 1720 intx _offset_3; 1721 uintptr_t _base_0; 1722 uintptr_t _base_1; 1723 uintptr_t _base_2; 1724 uintptr_t _base_3; 1725 uintptr_t _top; 1726 1727 static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions"); 1728 static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions"); 1729 static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions"); 1730 1731 public: 1732 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions) 1733 : _start(start), 1734 _offset_0(loaded_regions[0]._runtime_offset), 1735 _offset_1(loaded_regions[1]._runtime_offset), 1736 _offset_2(loaded_regions[2]._runtime_offset), 1737 _offset_3(loaded_regions[3]._runtime_offset), 1738 _base_0(loaded_regions[0]._dumptime_base), 1739 _base_1(loaded_regions[1]._dumptime_base), 1740 _base_2(loaded_regions[2]._dumptime_base), 1741 _base_3(loaded_regions[3]._dumptime_base) { 1742 _top = loaded_regions[NUM_LOADED_REGIONS-1].top(); 1743 } 1744 1745 bool do_bit(size_t offset) { 1746 narrowOop* p = _start + offset; 1747 narrowOop v = *p; 1748 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1749 uintptr_t o = cast_from_oop<uintptr_t>(HeapShared::decode_from_archive(v)); 1750 assert(_base_0 <= o && o < _top, "must be"); 1751 1752 1753 // We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons. 1754 if (NUM_LOADED_REGIONS > 3 && o >= _base_3) { 1755 o += _offset_3; 1756 } else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) { 1757 o += _offset_2; 1758 } else if (o >= _base_1) { 1759 o += _offset_1; 1760 } else { 1761 o += _offset_0; 1762 } 1763 HeapShared::assert_in_loaded_heap(o); 1764 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o)); 1765 return true; 1766 } 1767 }; 1768 1769 int HeapShared::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, 1770 MemRegion& archive_space) { 1771 size_t total_bytes = 0; 1772 int num_loaded_regions = 0; 1773 for (int i = MetaspaceShared::first_archive_heap_region; 1774 i <= MetaspaceShared::last_archive_heap_region; i++) { 1775 FileMapRegion* r = mapinfo->space_at(i); 1776 r->assert_is_heap_region(); 1777 if (r->used() > 0) { 1778 assert(is_aligned(r->used(), HeapWordSize), "must be"); 1779 total_bytes += r->used(); 1780 LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++]; 1781 ri->_region_index = i; 1782 ri->_region_size = r->used(); 1783 ri->_dumptime_base = (uintptr_t)mapinfo->start_address_as_decoded_from_archive(r); 1784 } 1785 } 1786 1787 assert(is_aligned(total_bytes, HeapWordSize), "must be"); 1788 size_t word_size = total_bytes / HeapWordSize; 1789 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size); 1790 if (buffer == nullptr) { 1791 return 0; 1792 } 1793 1794 archive_space = MemRegion(buffer, word_size); 1795 _loaded_heap_bottom = (uintptr_t)archive_space.start(); 1796 _loaded_heap_top = _loaded_heap_bottom + total_bytes; 1797 1798 return num_loaded_regions; 1799 } 1800 1801 void HeapShared::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions, 1802 uintptr_t buffer) { 1803 // Find the relocation offset of the pointers in each region 1804 qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion), 1805 LoadedArchiveHeapRegion::comparator); 1806 1807 uintptr_t p = buffer; 1808 for (int i = 0; i < num_loaded_regions; i++) { 1809 // This region will be loaded at p, so all objects inside this 1810 // region will be shifted by ri->offset 1811 LoadedArchiveHeapRegion* ri = &loaded_regions[i]; 1812 ri->_runtime_offset = p - ri->_dumptime_base; 1813 p += ri->_region_size; 1814 } 1815 assert(p == _loaded_heap_top, "must be"); 1816 } 1817 1818 bool HeapShared::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions, 1819 int num_loaded_regions, uintptr_t buffer) { 1820 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region(); 1821 if (bitmap_base == 0) { 1822 _loading_failed = true; 1823 return false; // OOM or CRC error 1824 } 1825 uintptr_t load_address = buffer; 1826 for (int i = 0; i < num_loaded_regions; i++) { 1827 LoadedArchiveHeapRegion* ri = &loaded_regions[i]; 1828 FileMapRegion* r = mapinfo->space_at(ri->_region_index); 1829 1830 if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { 1831 // There's no easy way to free the buffer, so we will fill it with zero later 1832 // in fill_failed_loaded_region(), and it will eventually be GC'ed. 1833 log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i); 1834 _loading_failed = true; 1835 return false; 1836 } 1837 log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT 1838 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT, 1839 ri->_region_index, load_address, load_address + ri->_region_size, 1840 ri->_region_size, ri->_runtime_offset); 1841 1842 uintptr_t oopmap = bitmap_base + r->oopmap_offset(); 1843 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits()); 1844 1845 if (num_loaded_regions == 4) { 1846 PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions); 1847 bm.iterate(&patcher); 1848 } else if (num_loaded_regions == 3) { 1849 PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions); 1850 bm.iterate(&patcher); 1851 } else { 1852 assert(num_loaded_regions == 2, "must be"); 1853 PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions); 1854 bm.iterate(&patcher); 1855 } 1856 1857 load_address += r->used(); 1858 } 1859 1860 return true; 1861 } 1862 1863 bool HeapShared::load_heap_regions(FileMapInfo* mapinfo) { 1864 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift()); 1865 1866 LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions]; 1867 memset(loaded_regions, 0, sizeof(loaded_regions)); 1868 1869 MemRegion archive_space; 1870 int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space); 1871 if (num_loaded_regions <= 0) { 1872 return false; 1873 } 1874 sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start()); 1875 if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) { 1876 assert(_loading_failed, "must be"); 1877 return false; 1878 } 1879 1880 init_loaded_heap_relocation(loaded_regions, num_loaded_regions); 1881 _is_loaded = true; 1882 1883 return true; 1884 } 1885 1886 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure { 1887 ResourceHashtable<uintptr_t, bool>* _table; 1888 1889 public: 1890 VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {} 1891 1892 virtual void do_oop(narrowOop* p) { 1893 // This should be called before the loaded regions are modified, so all the embedded pointers 1894 // must be NULL, or must point to a valid object in the loaded regions. 1895 narrowOop v = *p; 1896 if (!CompressedOops::is_null(v)) { 1897 oop o = CompressedOops::decode_not_null(v); 1898 uintptr_t u = cast_from_oop<uintptr_t>(o); 1899 HeapShared::assert_in_loaded_heap(u); 1900 guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions"); 1901 } 1902 } 1903 virtual void do_oop(oop* p) { 1904 ShouldNotReachHere(); 1905 } 1906 }; 1907 1908 void HeapShared::finish_initialization() { 1909 if (is_loaded()) { 1910 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1911 HeapWord* top = (HeapWord*)_loaded_heap_top; 1912 1913 MemRegion archive_space = MemRegion(bottom, top); 1914 Universe::heap()->complete_loaded_archive_space(archive_space); 1915 } 1916 1917 if (VerifyArchivedFields <= 0 || !is_loaded()) { 1918 return; 1919 } 1920 1921 log_info(cds, heap)("Verify all oops and pointers in loaded heap"); 1922 1923 ResourceMark rm; 1924 ResourceHashtable<uintptr_t, bool> table; 1925 VerifyLoadedHeapEmbeddedPointers verifier(&table); 1926 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1927 HeapWord* top = (HeapWord*)_loaded_heap_top; 1928 1929 for (HeapWord* p = bottom; p < top; ) { 1930 oop o = cast_to_oop(p); 1931 table.put(cast_from_oop<uintptr_t>(o), true); 1932 p += o->size(); 1933 } 1934 1935 for (HeapWord* p = bottom; p < top; ) { 1936 oop o = cast_to_oop(p); 1937 o->oop_iterate(&verifier); 1938 p += o->size(); 1939 } 1940 } 1941 1942 void HeapShared::fill_failed_loaded_region() { 1943 assert(_loading_failed, "must be"); 1944 if (_loaded_heap_bottom != 0) { 1945 assert(_loaded_heap_top != 0, "must be"); 1946 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 1947 HeapWord* top = (HeapWord*)_loaded_heap_top; 1948 Universe::heap()->fill_with_objects(bottom, top - bottom); 1949 } 1950 } 1951 1952 #endif // INCLUDE_CDS_JAVA_HEAP