1 /* 2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveUtils.hpp" 28 #include "cds/filemap.hpp" 29 #include "cds/heapShared.inline.hpp" 30 #include "cds/metaspaceShared.hpp" 31 #include "classfile/classLoaderData.hpp" 32 #include "classfile/classLoaderDataShared.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/moduleEntry.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/systemDictionary.hpp" 38 #include "classfile/systemDictionaryShared.hpp" 39 #include "classfile/vmClasses.hpp" 40 #include "classfile/vmSymbols.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcVMOperations.hpp" 43 #include "logging/log.hpp" 44 #include "logging/logMessage.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/iterator.inline.hpp" 47 #include "memory/metadataFactory.hpp" 48 #include "memory/metaspaceClosure.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/universe.hpp" 51 #include "oops/compressedOops.inline.hpp" 52 #include "oops/fieldStreams.inline.hpp" 53 #include "oops/objArrayOop.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "prims/jvmtiExport.hpp" 56 #include "runtime/fieldDescriptor.inline.hpp" 57 #include "runtime/globals_extension.hpp" 58 #include "runtime/init.hpp" 59 #include "runtime/java.hpp" 60 #include "runtime/javaCalls.hpp" 61 #include "runtime/safepoint.hpp" 62 #include "runtime/safepointVerifiers.hpp" 63 #include "utilities/bitMap.inline.hpp" 64 #include "utilities/copy.hpp" 65 #if INCLUDE_G1GC 66 #include "gc/g1/g1CollectedHeap.hpp" 67 #endif 68 69 #if INCLUDE_CDS_JAVA_HEAP 70 71 bool HeapShared::_closed_archive_heap_region_mapped = false; 72 bool HeapShared::_open_archive_heap_region_mapped = false; 73 bool HeapShared::_archive_heap_region_fixed = false; 74 address HeapShared::_narrow_oop_base; 75 int HeapShared::_narrow_oop_shift; 76 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 77 78 // 79 // If you add new entries to the following tables, you should know what you're doing! 80 // 81 82 // Entry fields for shareable subgraphs archived in the closed archive heap 83 // region. Warning: Objects in the subgraphs should not have reference fields 84 // assigned at runtime. 85 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 86 {"java/lang/Integer$IntegerCache", "archivedCache"}, 87 {"java/lang/Long$LongCache", "archivedCache"}, 88 {"java/lang/Byte$ByteCache", "archivedCache"}, 89 {"java/lang/Short$ShortCache", "archivedCache"}, 90 {"java/lang/Character$CharacterCache", "archivedCache"}, 91 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 92 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 93 }; 94 // Entry fields for subgraphs archived in the open archive heap region. 95 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 96 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 97 {"java/util/ImmutableCollections", "archivedObjects"}, 98 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 99 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 100 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 101 }; 102 103 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 104 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 105 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 106 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 107 {"java/lang/Module$ArchivedData", "archivedData"}, 108 }; 109 110 const static int num_closed_archive_subgraph_entry_fields = 111 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 112 const static int num_open_archive_subgraph_entry_fields = 113 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 114 const static int num_fmg_open_archive_subgraph_entry_fields = 115 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 116 117 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 118 narrowOop HeapShared::_roots_narrow; 119 OopHandle HeapShared::_roots; 120 121 //////////////////////////////////////////////////////////////// 122 // 123 // Java heap object archiving support 124 // 125 //////////////////////////////////////////////////////////////// 126 void HeapShared::fixup_mapped_heap_regions() { 127 FileMapInfo *mapinfo = FileMapInfo::current_info(); 128 mapinfo->fixup_mapped_heap_regions(); 129 set_archive_heap_region_fixed(); 130 if (is_mapped()) { 131 _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow)); 132 if (!MetaspaceShared::use_full_module_graph()) { 133 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 134 ClassLoaderDataShared::clear_archived_oops(); 135 } 136 } 137 SystemDictionaryShared::update_archived_mirror_native_pointers(); 138 } 139 140 unsigned HeapShared::oop_hash(oop const& p) { 141 assert(!p->mark().has_bias_pattern(), 142 "this object should never have been locked"); // so identity_hash won't safepoin 143 unsigned hash = (unsigned)p->identity_hash(); 144 return hash; 145 } 146 147 static void reset_states(oop obj, TRAPS) { 148 Handle h_obj(THREAD, obj); 149 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 150 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 151 Symbol* method_sig = vmSymbols::void_method_signature(); 152 153 while (klass != NULL) { 154 Method* method = klass->find_method(method_name, method_sig); 155 if (method != NULL) { 156 assert(method->is_private(), "must be"); 157 if (log_is_enabled(Debug, cds)) { 158 ResourceMark rm(THREAD); 159 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 160 } 161 JavaValue result(T_VOID); 162 JavaCalls::call_special(&result, h_obj, klass, 163 method_name, method_sig, CHECK); 164 } 165 klass = klass->java_super(); 166 } 167 } 168 169 void HeapShared::reset_archived_object_states(TRAPS) { 170 assert(DumpSharedSpaces, "dump-time only"); 171 log_debug(cds)("Resetting platform loader"); 172 reset_states(SystemDictionary::java_platform_loader(), CHECK); 173 log_debug(cds)("Resetting system loader"); 174 reset_states(SystemDictionary::java_system_loader(), CHECK); 175 } 176 177 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 178 oop HeapShared::find_archived_heap_object(oop obj) { 179 assert(DumpSharedSpaces, "dump-time only"); 180 ArchivedObjectCache* cache = archived_object_cache(); 181 oop* p = cache->get(obj); 182 if (p != NULL) { 183 return *p; 184 } else { 185 return NULL; 186 } 187 } 188 189 int HeapShared::append_root(oop obj) { 190 assert(DumpSharedSpaces, "dump-time only"); 191 192 // No GC should happen since we aren't scanning _pending_roots. 193 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 194 195 if (_pending_roots == NULL) { 196 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 197 } 198 199 return _pending_roots->append(obj); 200 } 201 202 objArrayOop HeapShared::roots() { 203 if (DumpSharedSpaces) { 204 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 205 if (!is_heap_object_archiving_allowed()) { 206 return NULL; 207 } 208 } else { 209 assert(UseSharedSpaces, "must be"); 210 } 211 212 objArrayOop roots = (objArrayOop)_roots.resolve(); 213 assert(roots != NULL, "should have been initialized"); 214 return roots; 215 } 216 217 void HeapShared::set_roots(narrowOop roots) { 218 assert(UseSharedSpaces, "runtime only"); 219 assert(open_archive_heap_region_mapped(), "must be"); 220 _roots_narrow = roots; 221 } 222 223 // Returns an objArray that contains all the roots of the archived objects 224 oop HeapShared::get_root(int index, bool clear) { 225 assert(index >= 0, "sanity"); 226 if (DumpSharedSpaces) { 227 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 228 assert(_pending_roots != NULL, "sanity"); 229 return _pending_roots->at(index); 230 } else { 231 assert(UseSharedSpaces, "must be"); 232 assert(!_roots.is_empty(), "must have loaded shared heap"); 233 oop result = roots()->obj_at(index); 234 if (clear) { 235 clear_root(index); 236 } 237 return result; 238 } 239 } 240 241 void HeapShared::clear_root(int index) { 242 assert(index >= 0, "sanity"); 243 assert(UseSharedSpaces, "must be"); 244 if (open_archive_heap_region_mapped()) { 245 if (log_is_enabled(Debug, cds, heap)) { 246 oop old = roots()->obj_at(index); 247 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 248 } 249 roots()->obj_at_put(index, NULL); 250 } 251 } 252 253 oop HeapShared::archive_heap_object(oop obj) { 254 assert(DumpSharedSpaces, "dump-time only"); 255 256 oop ao = find_archived_heap_object(obj); 257 if (ao != NULL) { 258 // already archived 259 return ao; 260 } 261 262 int len = obj->size(); 263 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 264 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 265 p2i(obj), (size_t)obj->size()); 266 return NULL; 267 } 268 269 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 270 if (archived_oop != NULL) { 271 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 272 // Reinitialize markword to remove age/marking/locking/etc. 273 // 274 // We need to retain the identity_hash, because it may have been used by some hashtables 275 // in the shared heap. This also has the side effect of pre-initializing the 276 // identity_hash for all shared objects, so they are less likely to be written 277 // into during run time, increasing the potential of memory sharing. 278 int hash_original = obj->identity_hash(); 279 if (UseCompactObjectHeaders) { 280 markWord mark = obj->mark(); 281 if (mark.has_displaced_mark_helper()) { 282 mark = mark.displaced_mark_helper(); 283 } 284 narrowKlass nklass = mark.narrow_klass(); 285 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original) LP64_ONLY(.set_narrow_klass(nklass))); 286 } else { 287 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original)); 288 } 289 assert(archived_oop->mark().is_unlocked(), "sanity"); 290 291 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 292 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 293 294 ArchivedObjectCache* cache = archived_object_cache(); 295 cache->put(obj, archived_oop); 296 if (log_is_enabled(Debug, cds, heap)) { 297 ResourceMark rm; 298 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 299 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 300 } 301 } else { 302 log_error(cds, heap)( 303 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 304 p2i(obj)); 305 vm_direct_exit(-1, 306 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 307 SIZE_FORMAT "M", MaxHeapSize/M)); 308 } 309 return archived_oop; 310 } 311 312 void HeapShared::archive_klass_objects() { 313 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 314 assert(klasses != NULL, "sanity"); 315 for (int i = 0; i < klasses->length(); i++) { 316 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i)); 317 318 // archive mirror object 319 java_lang_Class::archive_mirror(k); 320 321 // archive the resolved_referenes array 322 if (k->is_instance_klass()) { 323 InstanceKlass* ik = InstanceKlass::cast(k); 324 ik->constants()->archive_resolved_references(); 325 } 326 } 327 } 328 329 void HeapShared::run_full_gc_in_vm_thread() { 330 if (is_heap_object_archiving_allowed()) { 331 // Avoid fragmentation while archiving heap objects. 332 // We do this inside a safepoint, so that no further allocation can happen after GC 333 // has finished. 334 if (GCLocker::is_active()) { 335 // Just checking for safety ... 336 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 337 // has been modified such that JNI code is executed in some clean up threads after 338 // we have finished class loading. 339 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 340 } else { 341 log_info(cds)("Run GC ..."); 342 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 343 log_info(cds)("Run GC done"); 344 } 345 } 346 } 347 348 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion>* closed, 349 GrowableArray<MemRegion>* open) { 350 351 G1HeapVerifier::verify_ready_for_archiving(); 352 353 { 354 NoSafepointVerifier nsv; 355 356 // Cache for recording where the archived objects are copied to 357 create_archived_object_cache(); 358 359 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 360 p2i(CompressedOops::begin()), p2i(CompressedOops::end())); 361 log_info(cds)("Dumping objects to closed archive heap region ..."); 362 copy_closed_archive_heap_objects(closed); 363 364 log_info(cds)("Dumping objects to open archive heap region ..."); 365 copy_open_archive_heap_objects(open); 366 367 destroy_archived_object_cache(); 368 } 369 370 G1HeapVerifier::verify_archive_regions(); 371 } 372 373 void HeapShared::copy_closed_archive_heap_objects( 374 GrowableArray<MemRegion> * closed_archive) { 375 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 376 377 G1CollectedHeap::heap()->begin_archive_alloc_range(); 378 379 // Archive interned string objects 380 StringTable::write_to_archive(_dumped_interned_strings); 381 382 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 383 num_closed_archive_subgraph_entry_fields, 384 true /* is_closed_archive */, 385 false /* is_full_module_graph */); 386 387 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 388 os::vm_allocation_granularity()); 389 } 390 391 void HeapShared::copy_open_archive_heap_objects( 392 GrowableArray<MemRegion> * open_archive) { 393 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 394 395 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 396 397 java_lang_Class::archive_basic_type_mirrors(); 398 399 archive_klass_objects(); 400 401 archive_object_subgraphs(open_archive_subgraph_entry_fields, 402 num_open_archive_subgraph_entry_fields, 403 false /* is_closed_archive */, 404 false /* is_full_module_graph */); 405 if (MetaspaceShared::use_full_module_graph()) { 406 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 407 num_fmg_open_archive_subgraph_entry_fields, 408 false /* is_closed_archive */, 409 true /* is_full_module_graph */); 410 ClassLoaderDataShared::init_archived_oops(); 411 } 412 413 copy_roots(); 414 415 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 416 os::vm_allocation_granularity()); 417 } 418 419 // Copy _pending_archive_roots into an objArray 420 void HeapShared::copy_roots() { 421 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 422 int size = objArrayOopDesc::object_size(length); 423 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 424 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 425 426 memset(mem, 0, size * BytesPerWord); 427 { 428 // This is copied from MemAllocator::finish 429 if (UseBiasedLocking) { 430 oopDesc::set_mark(mem, k->prototype_header()); 431 } else if (UseCompactObjectHeaders) { 432 oopDesc::release_set_mark(mem, k->prototype_header()); 433 } else { 434 oopDesc::set_mark(mem, markWord::prototype()); 435 } 436 if (!UseCompactObjectHeaders) { 437 oopDesc::release_set_klass(mem, k); 438 } 439 } 440 { 441 // This is copied from ObjArrayAllocator::initialize 442 arrayOopDesc::set_length(mem, length); 443 } 444 445 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 446 for (int i = 0; i < length; i++) { 447 roots()->obj_at_put(i, _pending_roots->at(i)); 448 } 449 log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem); 450 } 451 452 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 453 _narrow_oop_base = base; 454 _narrow_oop_shift = shift; 455 } 456 457 // 458 // Subgraph archiving support 459 // 460 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 461 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 462 463 // Get the subgraph_info for Klass k. A new subgraph_info is created if 464 // there is no existing one for k. The subgraph_info records the relocated 465 // Klass* of the original k. 466 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 467 assert(DumpSharedSpaces, "dump time only"); 468 bool created; 469 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 470 KlassSubGraphInfo* info = 471 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph), 472 &created); 473 assert(created, "must not initialize twice"); 474 return info; 475 } 476 477 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 478 assert(DumpSharedSpaces, "dump time only"); 479 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 480 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 481 assert(info != NULL, "must have been initialized"); 482 return info; 483 } 484 485 // Add an entry field to the current KlassSubGraphInfo. 486 void KlassSubGraphInfo::add_subgraph_entry_field( 487 int static_field_offset, oop v, bool is_closed_archive) { 488 assert(DumpSharedSpaces, "dump time only"); 489 if (_subgraph_entry_fields == NULL) { 490 _subgraph_entry_fields = 491 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass); 492 } 493 _subgraph_entry_fields->append(static_field_offset); 494 _subgraph_entry_fields->append(HeapShared::append_root(v)); 495 } 496 497 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 498 // Only objects of boot classes can be included in sub-graph. 499 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 500 assert(DumpSharedSpaces, "dump time only"); 501 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k); 502 503 if (_subgraph_object_klasses == NULL) { 504 _subgraph_object_klasses = 505 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 506 } 507 508 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class"); 509 510 if (_k == relocated_k) { 511 // Don't add the Klass containing the sub-graph to it's own klass 512 // initialization list. 513 return; 514 } 515 516 if (relocated_k->is_instance_klass()) { 517 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 518 "must be boot class"); 519 // vmClasses::xxx_klass() are not updated, need to check 520 // the original Klass* 521 if (orig_k == vmClasses::String_klass() || 522 orig_k == vmClasses::Object_klass()) { 523 // Initialized early during VM initialization. No need to be added 524 // to the sub-graph object class list. 525 return; 526 } 527 } else if (relocated_k->is_objArray_klass()) { 528 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 529 if (abk->is_instance_klass()) { 530 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 531 "must be boot class"); 532 } 533 if (relocated_k == Universe::objectArrayKlassObj()) { 534 // Initialized early during Universe::genesis. No need to be added 535 // to the list. 536 return; 537 } 538 } else { 539 assert(relocated_k->is_typeArray_klass(), "must be"); 540 // Primitive type arrays are created early during Universe::genesis. 541 return; 542 } 543 544 if (log_is_enabled(Debug, cds, heap)) { 545 if (!_subgraph_object_klasses->contains(relocated_k)) { 546 ResourceMark rm; 547 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 548 } 549 } 550 551 _subgraph_object_klasses->append_if_missing(relocated_k); 552 _has_non_early_klasses |= is_non_early_klass(orig_k); 553 } 554 555 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 556 if (k->is_objArray_klass()) { 557 k = ObjArrayKlass::cast(k)->bottom_klass(); 558 } 559 if (k->is_instance_klass()) { 560 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 561 ResourceMark rm; 562 log_info(cds, heap)("non-early: %s", k->external_name()); 563 return true; 564 } else { 565 return false; 566 } 567 } else { 568 return false; 569 } 570 } 571 572 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 573 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 574 _k = info->klass(); 575 _entry_field_records = NULL; 576 _subgraph_object_klasses = NULL; 577 _is_full_module_graph = info->is_full_module_graph(); 578 579 if (_is_full_module_graph) { 580 // Consider all classes referenced by the full module graph as early -- we will be 581 // allocating objects of these classes during JVMTI early phase, so they cannot 582 // be processed by (non-early) JVMTI ClassFileLoadHook 583 _has_non_early_klasses = false; 584 } else { 585 _has_non_early_klasses = info->has_non_early_klasses(); 586 } 587 588 if (_has_non_early_klasses) { 589 ResourceMark rm; 590 log_info(cds, heap)( 591 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 592 _k->external_name()); 593 } 594 595 // populate the entry fields 596 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 597 if (entry_fields != NULL) { 598 int num_entry_fields = entry_fields->length(); 599 assert(num_entry_fields % 2 == 0, "sanity"); 600 _entry_field_records = 601 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 602 for (int i = 0 ; i < num_entry_fields; i++) { 603 _entry_field_records->at_put(i, entry_fields->at(i)); 604 } 605 } 606 607 // the Klasses of the objects in the sub-graphs 608 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 609 if (subgraph_object_klasses != NULL) { 610 int num_subgraphs_klasses = subgraph_object_klasses->length(); 611 _subgraph_object_klasses = 612 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 613 for (int i = 0; i < num_subgraphs_klasses; i++) { 614 Klass* subgraph_k = subgraph_object_klasses->at(i); 615 if (log_is_enabled(Info, cds, heap)) { 616 ResourceMark rm; 617 log_info(cds, heap)( 618 "Archived object klass %s (%2d) => %s", 619 _k->external_name(), i, subgraph_k->external_name()); 620 } 621 _subgraph_object_klasses->at_put(i, subgraph_k); 622 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 623 } 624 } 625 626 ArchivePtrMarker::mark_pointer(&_k); 627 ArchivePtrMarker::mark_pointer(&_entry_field_records); 628 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 629 } 630 631 struct CopyKlassSubGraphInfoToArchive : StackObj { 632 CompactHashtableWriter* _writer; 633 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 634 635 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 636 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 637 ArchivedKlassSubGraphInfoRecord* record = 638 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 639 record->init(&info); 640 641 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass); 642 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 643 _writer->add(hash, delta); 644 } 645 return true; // keep on iterating 646 } 647 }; 648 649 // Build the records of archived subgraph infos, which include: 650 // - Entry points to all subgraphs from the containing class mirror. The entry 651 // points are static fields in the mirror. For each entry point, the field 652 // offset, value and is_closed_archive flag are recorded in the sub-graph 653 // info. The value is stored back to the corresponding field at runtime. 654 // - A list of klasses that need to be loaded/initialized before archived 655 // java object sub-graph can be accessed at runtime. 656 void HeapShared::write_subgraph_info_table() { 657 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 658 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 659 CompactHashtableStats stats; 660 661 _run_time_subgraph_info_table.reset(); 662 663 CompactHashtableWriter writer(d_table->_count, &stats); 664 CopyKlassSubGraphInfoToArchive copy(&writer); 665 d_table->iterate(©); 666 667 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 668 } 669 670 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 671 _run_time_subgraph_info_table.serialize_header(soc); 672 } 673 674 static void verify_the_heap(Klass* k, const char* which) { 675 if (VerifyArchivedFields > 0) { 676 ResourceMark rm; 677 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 678 which, k->external_name()); 679 680 VM_Verify verify_op; 681 VMThread::execute(&verify_op); 682 683 if (VerifyArchivedFields > 1 && is_init_completed()) { 684 // At this time, the oop->klass() of some archived objects in the heap may not 685 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 686 // have enough information (object size, oop maps, etc) so that a GC can be safely 687 // performed. 688 // 689 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 690 // to check for GC safety. 691 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 692 which, k->external_name()); 693 FlagSetting fs1(VerifyBeforeGC, true); 694 FlagSetting fs2(VerifyDuringGC, true); 695 FlagSetting fs3(VerifyAfterGC, true); 696 Universe::heap()->collect(GCCause::_java_lang_system_gc); 697 } 698 } 699 } 700 701 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 702 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 703 // 704 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 705 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 706 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 707 void HeapShared::resolve_classes(JavaThread* THREAD) { 708 if (!is_mapped()) { 709 return; // nothing to do 710 } 711 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, 712 num_closed_archive_subgraph_entry_fields, 713 THREAD); 714 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, 715 num_open_archive_subgraph_entry_fields, 716 THREAD); 717 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, 718 num_fmg_open_archive_subgraph_entry_fields, 719 THREAD); 720 } 721 722 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], 723 int num, JavaThread* THREAD) { 724 for (int i = 0; i < num; i++) { 725 ArchivableStaticFieldInfo* info = &fields[i]; 726 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 727 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 728 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 729 resolve_classes_for_subgraph_of(k, THREAD); 730 } 731 } 732 733 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) { 734 ExceptionMark em(THREAD); 735 const ArchivedKlassSubGraphInfoRecord* record = 736 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 737 if (HAS_PENDING_EXCEPTION) { 738 CLEAR_PENDING_EXCEPTION; 739 } 740 if (record == NULL) { 741 clear_archived_roots_of(k); 742 } 743 } 744 745 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) { 746 if (!is_mapped()) { 747 return; // nothing to do 748 } 749 750 ExceptionMark em(THREAD); 751 const ArchivedKlassSubGraphInfoRecord* record = 752 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 753 754 if (HAS_PENDING_EXCEPTION) { 755 CLEAR_PENDING_EXCEPTION; 756 // None of the field value will be set if there was an exception when initializing the classes. 757 // The java code will not see any of the archived objects in the 758 // subgraphs referenced from k in this case. 759 return; 760 } 761 762 if (record != NULL) { 763 init_archived_fields_for(k, record); 764 } 765 } 766 767 const ArchivedKlassSubGraphInfoRecord* 768 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 769 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 770 771 if (!k->is_shared()) { 772 return NULL; 773 } 774 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 775 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 776 777 // Initialize from archived data. Currently this is done only 778 // during VM initialization time. No lock is needed. 779 if (record != NULL) { 780 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 781 if (log_is_enabled(Info, cds, heap)) { 782 ResourceMark rm(THREAD); 783 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 784 k->external_name()); 785 } 786 return NULL; 787 } 788 789 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 790 if (log_is_enabled(Info, cds, heap)) { 791 ResourceMark rm(THREAD); 792 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 793 k->external_name()); 794 } 795 return NULL; 796 } 797 798 resolve_or_init(k, do_init, CHECK_NULL); 799 800 // Load/link/initialize the klasses of the objects in the subgraph. 801 // NULL class loader is used. 802 Array<Klass*>* klasses = record->subgraph_object_klasses(); 803 if (klasses != NULL) { 804 for (int i = 0; i < klasses->length(); i++) { 805 Klass* klass = klasses->at(i); 806 if (!klass->is_shared()) { 807 return NULL; 808 } 809 resolve_or_init(klass, do_init, CHECK_NULL); 810 } 811 } 812 } 813 814 return record; 815 } 816 817 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 818 if (!do_init) { 819 if (k->class_loader_data() == NULL) { 820 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 821 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 822 } 823 } else { 824 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 825 if (k->is_instance_klass()) { 826 InstanceKlass* ik = InstanceKlass::cast(k); 827 ik->initialize(CHECK); 828 } else if (k->is_objArray_klass()) { 829 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 830 oak->initialize(CHECK); 831 } 832 } 833 } 834 835 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 836 verify_the_heap(k, "before"); 837 838 // Load the subgraph entry fields from the record and store them back to 839 // the corresponding fields within the mirror. 840 oop m = k->java_mirror(); 841 Array<int>* entry_field_records = record->entry_field_records(); 842 if (entry_field_records != NULL) { 843 int efr_len = entry_field_records->length(); 844 assert(efr_len % 2 == 0, "sanity"); 845 for (int i = 0; i < efr_len; i += 2) { 846 int field_offset = entry_field_records->at(i); 847 int root_index = entry_field_records->at(i+1); 848 oop v = get_root(root_index, /*clear=*/true); 849 m->obj_field_put(field_offset, v); 850 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 851 } 852 853 // Done. Java code can see the archived sub-graphs referenced from k's 854 // mirror after this point. 855 if (log_is_enabled(Info, cds, heap)) { 856 ResourceMark rm; 857 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 858 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 859 } 860 } 861 862 verify_the_heap(k, "after "); 863 } 864 865 void HeapShared::clear_archived_roots_of(Klass* k) { 866 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 867 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 868 if (record != NULL) { 869 Array<int>* entry_field_records = record->entry_field_records(); 870 if (entry_field_records != NULL) { 871 int efr_len = entry_field_records->length(); 872 assert(efr_len % 2 == 0, "sanity"); 873 for (int i = 0; i < efr_len; i += 2) { 874 int root_index = entry_field_records->at(i+1); 875 clear_root(root_index); 876 } 877 } 878 } 879 } 880 881 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 882 int _level; 883 bool _is_closed_archive; 884 bool _record_klasses_only; 885 KlassSubGraphInfo* _subgraph_info; 886 oop _orig_referencing_obj; 887 oop _archived_referencing_obj; 888 public: 889 WalkOopAndArchiveClosure(int level, 890 bool is_closed_archive, 891 bool record_klasses_only, 892 KlassSubGraphInfo* subgraph_info, 893 oop orig, oop archived) : 894 _level(level), _is_closed_archive(is_closed_archive), 895 _record_klasses_only(record_klasses_only), 896 _subgraph_info(subgraph_info), 897 _orig_referencing_obj(orig), _archived_referencing_obj(archived) {} 898 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 899 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 900 901 protected: 902 template <class T> void do_oop_work(T *p) { 903 oop obj = RawAccess<>::oop_load(p); 904 if (!CompressedOops::is_null(obj)) { 905 assert(!HeapShared::is_archived_object(obj), 906 "original objects must not point to archived objects"); 907 908 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 909 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 910 911 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 912 ResourceMark rm; 913 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 914 _orig_referencing_obj->klass()->external_name(), field_delta, 915 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 916 LogTarget(Trace, cds, heap) log; 917 LogStream out(log); 918 obj->print_on(&out); 919 } 920 921 oop archived = HeapShared::archive_reachable_objects_from( 922 _level + 1, _subgraph_info, obj, _is_closed_archive); 923 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 924 assert(HeapShared::is_archived_object(archived), "must be"); 925 926 if (!_record_klasses_only) { 927 // Update the reference in the archived copy of the referencing object. 928 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 929 _level, p2i(new_p), p2i(obj), p2i(archived)); 930 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 931 } 932 } 933 } 934 }; 935 936 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k) { 937 // Check fields in the object 938 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 939 if (!fs.access_flags().is_static()) { 940 BasicType ft = fs.field_descriptor().field_type(); 941 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 942 ResourceMark rm; 943 log_warning(cds, heap)( 944 "Please check reference field in %s instance in closed archive heap region: %s %s", 945 k->external_name(), (fs.name())->as_C_string(), 946 (fs.signature())->as_C_string()); 947 } 948 } 949 } 950 } 951 952 void HeapShared::check_module_oop(oop orig_module_obj) { 953 assert(DumpSharedSpaces, "must be"); 954 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 955 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 956 if (orig_module_ent == NULL) { 957 // These special Module objects are created in Java code. They are not 958 // defined via Modules::define_module(), so they don't have a ModuleEntry: 959 // java.lang.Module::ALL_UNNAMED_MODULE 960 // java.lang.Module::EVERYONE_MODULE 961 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 962 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 963 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 964 } else { 965 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 966 assert(loader_data->is_builtin_class_loader_data(), "must be"); 967 } 968 } 969 970 971 // (1) If orig_obj has not been archived yet, archive it. 972 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 973 // trace all objects that are reachable from it, and make sure these objects are archived. 974 // (3) Record the klasses of all orig_obj and all reachable objects. 975 oop HeapShared::archive_reachable_objects_from(int level, 976 KlassSubGraphInfo* subgraph_info, 977 oop orig_obj, 978 bool is_closed_archive) { 979 assert(orig_obj != NULL, "must be"); 980 assert(!is_archived_object(orig_obj), "sanity"); 981 982 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 983 // This object has injected fields that cannot be supported easily, so we disallow them for now. 984 // If you get an error here, you probably made a change in the JDK library that has added 985 // these objects that are referenced (directly or indirectly) by static fields. 986 ResourceMark rm; 987 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 988 vm_direct_exit(1); 989 } 990 991 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 992 // them as Klass::_archived_mirror because they need to be specially restored at run time. 993 // 994 // If you get an error here, you probably made a change in the JDK library that has added a Class 995 // object that is referenced (directly or indirectly) by static fields. 996 if (java_lang_Class::is_instance(orig_obj)) { 997 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 998 vm_direct_exit(1); 999 } 1000 1001 oop archived_obj = find_archived_heap_object(orig_obj); 1002 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 1003 // To save time, don't walk strings that are already archived. They just contain 1004 // pointers to a type array, whose klass doesn't need to be recorded. 1005 return archived_obj; 1006 } 1007 1008 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1009 // orig_obj has already been archived and traced. Nothing more to do. 1010 return archived_obj; 1011 } else { 1012 set_has_been_seen_during_subgraph_recording(orig_obj); 1013 } 1014 1015 bool record_klasses_only = (archived_obj != NULL); 1016 if (archived_obj == NULL) { 1017 ++_num_new_archived_objs; 1018 archived_obj = archive_heap_object(orig_obj); 1019 if (archived_obj == NULL) { 1020 // Skip archiving the sub-graph referenced from the current entry field. 1021 ResourceMark rm; 1022 log_error(cds, heap)( 1023 "Cannot archive the sub-graph referenced from %s object (" 1024 PTR_FORMAT ") size %d, skipped.", 1025 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1026 if (level == 1) { 1027 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1028 // as the Java code will take care of initializing this field dynamically. 1029 return NULL; 1030 } else { 1031 // We don't know how to handle an object that has been archived, but some of its reachable 1032 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1033 // we have a real use case. 1034 vm_direct_exit(1); 1035 } 1036 } 1037 1038 if (java_lang_Module::is_instance(orig_obj)) { 1039 check_module_oop(orig_obj); 1040 java_lang_Module::set_module_entry(archived_obj, NULL); 1041 java_lang_Module::set_loader(archived_obj, NULL); 1042 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1043 // class_data will be restored explicitly at run time. 1044 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1045 orig_obj == SystemDictionary::java_system_loader() || 1046 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be"); 1047 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1048 } 1049 } 1050 1051 assert(archived_obj != NULL, "must be"); 1052 Klass *orig_k = orig_obj->klass(); 1053 subgraph_info->add_subgraph_object_klass(orig_k); 1054 1055 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1056 subgraph_info, orig_obj, archived_obj); 1057 orig_obj->oop_iterate(&walker); 1058 if (is_closed_archive && orig_k->is_instance_klass()) { 1059 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k)); 1060 } 1061 return archived_obj; 1062 } 1063 1064 // 1065 // Start from the given static field in a java mirror and archive the 1066 // complete sub-graph of java heap objects that are reached directly 1067 // or indirectly from the starting object by following references. 1068 // Sub-graph archiving restrictions (current): 1069 // 1070 // - All classes of objects in the archived sub-graph (including the 1071 // entry class) must be boot class only. 1072 // - No java.lang.Class instance (java mirror) can be included inside 1073 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1074 // 1075 // The Java heap object sub-graph archiving process (see 1076 // WalkOopAndArchiveClosure): 1077 // 1078 // 1) Java object sub-graph archiving starts from a given static field 1079 // within a Class instance (java mirror). If the static field is a 1080 // refererence field and points to a non-null java object, proceed to 1081 // the next step. 1082 // 1083 // 2) Archives the referenced java object. If an archived copy of the 1084 // current object already exists, updates the pointer in the archived 1085 // copy of the referencing object to point to the current archived object. 1086 // Otherwise, proceed to the next step. 1087 // 1088 // 3) Follows all references within the current java object and recursively 1089 // archive the sub-graph of objects starting from each reference. 1090 // 1091 // 4) Updates the pointer in the archived copy of referencing object to 1092 // point to the current archived object. 1093 // 1094 // 5) The Klass of the current java object is added to the list of Klasses 1095 // for loading and initialzing before any object in the archived graph can 1096 // be accessed at runtime. 1097 // 1098 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1099 const char* klass_name, 1100 int field_offset, 1101 const char* field_name, 1102 bool is_closed_archive) { 1103 assert(DumpSharedSpaces, "dump time only"); 1104 assert(k->is_shared_boot_class(), "must be boot class"); 1105 1106 oop m = k->java_mirror(); 1107 1108 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1109 oop f = m->obj_field(field_offset); 1110 1111 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1112 1113 if (!CompressedOops::is_null(f)) { 1114 if (log_is_enabled(Trace, cds, heap)) { 1115 LogTarget(Trace, cds, heap) log; 1116 LogStream out(log); 1117 f->print_on(&out); 1118 } 1119 1120 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1121 1122 if (af == NULL) { 1123 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1124 klass_name, field_name); 1125 } else { 1126 // Note: the field value is not preserved in the archived mirror. 1127 // Record the field as a new subGraph entry point. The recorded 1128 // information is restored from the archive at runtime. 1129 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1130 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1131 } 1132 } else { 1133 // The field contains null, we still need to record the entry point, 1134 // so it can be restored at runtime. 1135 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1136 } 1137 } 1138 1139 #ifndef PRODUCT 1140 class VerifySharedOopClosure: public BasicOopIterateClosure { 1141 private: 1142 bool _is_archived; 1143 1144 public: 1145 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1146 1147 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1148 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1149 1150 protected: 1151 template <class T> void do_oop_work(T *p) { 1152 oop obj = RawAccess<>::oop_load(p); 1153 if (!CompressedOops::is_null(obj)) { 1154 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1155 } 1156 } 1157 }; 1158 1159 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1160 assert(DumpSharedSpaces, "dump time only"); 1161 assert(k->is_shared_boot_class(), "must be boot class"); 1162 1163 oop m = k->java_mirror(); 1164 oop f = m->obj_field(field_offset); 1165 if (!CompressedOops::is_null(f)) { 1166 verify_subgraph_from(f); 1167 } 1168 } 1169 1170 void HeapShared::verify_subgraph_from(oop orig_obj) { 1171 oop archived_obj = find_archived_heap_object(orig_obj); 1172 if (archived_obj == NULL) { 1173 // It's OK for the root of a subgraph to be not archived. See comments in 1174 // archive_reachable_objects_from(). 1175 return; 1176 } 1177 1178 // Verify that all objects reachable from orig_obj are archived. 1179 init_seen_objects_table(); 1180 verify_reachable_objects_from(orig_obj, false); 1181 delete_seen_objects_table(); 1182 1183 // Note: we could also verify that all objects reachable from the archived 1184 // copy of orig_obj can only point to archived objects, with: 1185 // init_seen_objects_table(); 1186 // verify_reachable_objects_from(archived_obj, true); 1187 // init_seen_objects_table(); 1188 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1189 // won't do it here. 1190 } 1191 1192 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1193 _num_total_verifications ++; 1194 if (!has_been_seen_during_subgraph_recording(obj)) { 1195 set_has_been_seen_during_subgraph_recording(obj); 1196 1197 if (is_archived) { 1198 assert(is_archived_object(obj), "must be"); 1199 assert(find_archived_heap_object(obj) == NULL, "must be"); 1200 } else { 1201 assert(!is_archived_object(obj), "must be"); 1202 assert(find_archived_heap_object(obj) != NULL, "must be"); 1203 } 1204 1205 VerifySharedOopClosure walker(is_archived); 1206 obj->oop_iterate(&walker); 1207 } 1208 } 1209 #endif 1210 1211 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1212 int HeapShared::_num_new_walked_objs; 1213 int HeapShared::_num_new_archived_objs; 1214 int HeapShared::_num_old_recorded_klasses; 1215 1216 int HeapShared::_num_total_subgraph_recordings = 0; 1217 int HeapShared::_num_total_walked_objs = 0; 1218 int HeapShared::_num_total_archived_objs = 0; 1219 int HeapShared::_num_total_recorded_klasses = 0; 1220 int HeapShared::_num_total_verifications = 0; 1221 1222 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1223 return _seen_objects_table->get(obj) != NULL; 1224 } 1225 1226 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1227 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1228 _seen_objects_table->put(obj, true); 1229 ++ _num_new_walked_objs; 1230 } 1231 1232 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1233 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1234 init_subgraph_info(k, is_full_module_graph); 1235 init_seen_objects_table(); 1236 _num_new_walked_objs = 0; 1237 _num_new_archived_objs = 0; 1238 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1239 } 1240 1241 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1242 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1243 _num_old_recorded_klasses; 1244 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1245 "walked %d objs, archived %d new objs, recorded %d classes", 1246 class_name, _num_new_walked_objs, _num_new_archived_objs, 1247 num_new_recorded_klasses); 1248 1249 delete_seen_objects_table(); 1250 1251 _num_total_subgraph_recordings ++; 1252 _num_total_walked_objs += _num_new_walked_objs; 1253 _num_total_archived_objs += _num_new_archived_objs; 1254 _num_total_recorded_klasses += num_new_recorded_klasses; 1255 } 1256 1257 class ArchivableStaticFieldFinder: public FieldClosure { 1258 InstanceKlass* _ik; 1259 Symbol* _field_name; 1260 bool _found; 1261 int _offset; 1262 public: 1263 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1264 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1265 1266 virtual void do_field(fieldDescriptor* fd) { 1267 if (fd->name() == _field_name) { 1268 assert(!_found, "fields cannot be overloaded"); 1269 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 1270 _found = true; 1271 _offset = fd->offset(); 1272 } 1273 } 1274 bool found() { return _found; } 1275 int offset() { return _offset; } 1276 }; 1277 1278 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1279 int num, TRAPS) { 1280 for (int i = 0; i < num; i++) { 1281 ArchivableStaticFieldInfo* info = &fields[i]; 1282 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1283 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1284 1285 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK); 1286 InstanceKlass* ik = InstanceKlass::cast(k); 1287 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1288 "Only support boot classes"); 1289 ik->initialize(CHECK); 1290 1291 ArchivableStaticFieldFinder finder(ik, field_name); 1292 ik->do_local_static_fields(&finder); 1293 assert(finder.found(), "field must exist"); 1294 1295 info->klass = ik; 1296 info->offset = finder.offset(); 1297 } 1298 } 1299 1300 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1301 assert(is_heap_object_archiving_allowed(), "Sanity check"); 1302 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1303 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1304 num_closed_archive_subgraph_entry_fields, 1305 CHECK); 1306 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1307 num_open_archive_subgraph_entry_fields, 1308 CHECK); 1309 if (MetaspaceShared::use_full_module_graph()) { 1310 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, 1311 num_fmg_open_archive_subgraph_entry_fields, 1312 CHECK); 1313 } 1314 } 1315 1316 void HeapShared::init_for_dumping(TRAPS) { 1317 if (is_heap_object_archiving_allowed()) { 1318 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings(); 1319 init_subgraph_entry_fields(CHECK); 1320 } 1321 } 1322 1323 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1324 int num, bool is_closed_archive, 1325 bool is_full_module_graph) { 1326 _num_total_subgraph_recordings = 0; 1327 _num_total_walked_objs = 0; 1328 _num_total_archived_objs = 0; 1329 _num_total_recorded_klasses = 0; 1330 _num_total_verifications = 0; 1331 1332 // For each class X that has one or more archived fields: 1333 // [1] Dump the subgraph of each archived field 1334 // [2] Create a list of all the class of the objects that can be reached 1335 // by any of these static fields. 1336 // At runtime, these classes are initialized before X's archived fields 1337 // are restored by HeapShared::initialize_from_archived_subgraph(). 1338 int i; 1339 for (i = 0; i < num; ) { 1340 ArchivableStaticFieldInfo* info = &fields[i]; 1341 const char* klass_name = info->klass_name; 1342 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1343 1344 // If you have specified consecutive fields of the same klass in 1345 // fields[], these will be archived in the same 1346 // {start_recording_subgraph ... done_recording_subgraph} pass to 1347 // save time. 1348 for (; i < num; i++) { 1349 ArchivableStaticFieldInfo* f = &fields[i]; 1350 if (f->klass_name != klass_name) { 1351 break; 1352 } 1353 1354 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1355 f->offset, f->field_name, 1356 is_closed_archive); 1357 } 1358 done_recording_subgraph(info->klass, klass_name); 1359 } 1360 1361 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1362 is_closed_archive ? "closed" : "open", 1363 _num_total_subgraph_recordings); 1364 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1365 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1366 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1367 1368 #ifndef PRODUCT 1369 for (int i = 0; i < num; i++) { 1370 ArchivableStaticFieldInfo* f = &fields[i]; 1371 verify_subgraph_from_static_field(f->klass, f->offset); 1372 } 1373 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1374 #endif 1375 } 1376 1377 // Not all the strings in the global StringTable are dumped into the archive, because 1378 // some of those strings may be only referenced by classes that are excluded from 1379 // the archive. We need to explicitly mark the strings that are: 1380 // [1] used by classes that WILL be archived; 1381 // [2] included in the SharedArchiveConfigFile. 1382 void HeapShared::add_to_dumped_interned_strings(oop string) { 1383 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1384 bool created; 1385 _dumped_interned_strings->put_if_absent(string, true, &created); 1386 } 1387 1388 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1389 // region. This way we can quickly relocate all the pointers without using 1390 // BasicOopIterateClosure at runtime. 1391 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1392 narrowOop* _start; 1393 BitMap *_oopmap; 1394 int _num_total_oops; 1395 int _num_null_oops; 1396 public: 1397 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 1398 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1399 1400 virtual void do_oop(narrowOop* p) { 1401 _num_total_oops ++; 1402 narrowOop v = *p; 1403 if (!CompressedOops::is_null(v)) { 1404 size_t idx = p - _start; 1405 _oopmap->set_bit(idx); 1406 } else { 1407 _num_null_oops ++; 1408 } 1409 } 1410 virtual void do_oop(oop *p) { 1411 ShouldNotReachHere(); 1412 } 1413 int num_total_oops() const { return _num_total_oops; } 1414 int num_null_oops() const { return _num_null_oops; } 1415 }; 1416 1417 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1418 assert(UseCompressedOops, "must be"); 1419 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1420 ResourceBitMap oopmap(num_bits); 1421 1422 HeapWord* p = region.start(); 1423 HeapWord* end = region.end(); 1424 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1425 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1426 1427 int num_objs = 0; 1428 while (p < end) { 1429 oop o = cast_to_oop(p); 1430 o->oop_iterate(&finder); 1431 p += o->size(); 1432 if (DumpSharedSpaces) { 1433 builder->relocate_klass_ptr(o); 1434 } 1435 ++ num_objs; 1436 } 1437 1438 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1439 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1440 return oopmap; 1441 } 1442 1443 // Patch all the embedded oop pointers inside an archived heap region, 1444 // to be consistent with the runtime oop encoding. 1445 class PatchEmbeddedPointers: public BitMapClosure { 1446 narrowOop* _start; 1447 1448 public: 1449 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1450 1451 bool do_bit(size_t offset) { 1452 narrowOop* p = _start + offset; 1453 narrowOop v = *p; 1454 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1455 oop o = HeapShared::decode_from_archive(v); 1456 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1457 return true; 1458 } 1459 }; 1460 1461 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1462 size_t oopmap_size_in_bits) { 1463 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1464 1465 #ifndef PRODUCT 1466 ResourceMark rm; 1467 ResourceBitMap checkBm = calculate_oopmap(region); 1468 assert(bm.is_same(checkBm), "sanity"); 1469 #endif 1470 1471 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1472 bm.iterate(&patcher); 1473 } 1474 1475 #endif // INCLUDE_CDS_JAVA_HEAP