1 /* 2 * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveBuilder.hpp" 27 #include "cds/archiveUtils.hpp" 28 #include "cds/filemap.hpp" 29 #include "cds/heapShared.inline.hpp" 30 #include "cds/metaspaceShared.hpp" 31 #include "classfile/classLoaderData.hpp" 32 #include "classfile/classLoaderDataShared.hpp" 33 #include "classfile/javaClasses.inline.hpp" 34 #include "classfile/moduleEntry.hpp" 35 #include "classfile/stringTable.hpp" 36 #include "classfile/symbolTable.hpp" 37 #include "classfile/systemDictionary.hpp" 38 #include "classfile/systemDictionaryShared.hpp" 39 #include "classfile/vmClasses.hpp" 40 #include "classfile/vmSymbols.hpp" 41 #include "gc/shared/gcLocker.hpp" 42 #include "gc/shared/gcVMOperations.hpp" 43 #include "logging/log.hpp" 44 #include "logging/logMessage.hpp" 45 #include "logging/logStream.hpp" 46 #include "memory/iterator.inline.hpp" 47 #include "memory/metadataFactory.hpp" 48 #include "memory/metaspaceClosure.hpp" 49 #include "memory/resourceArea.hpp" 50 #include "memory/universe.hpp" 51 #include "oops/compressedOops.inline.hpp" 52 #include "oops/fieldStreams.inline.hpp" 53 #include "oops/objArrayOop.hpp" 54 #include "oops/oop.inline.hpp" 55 #include "prims/jvmtiExport.hpp" 56 #include "runtime/fieldDescriptor.inline.hpp" 57 #include "runtime/globals_extension.hpp" 58 #include "runtime/init.hpp" 59 #include "runtime/java.hpp" 60 #include "runtime/javaCalls.hpp" 61 #include "runtime/safepointVerifiers.hpp" 62 #include "utilities/bitMap.inline.hpp" 63 #include "utilities/copy.hpp" 64 #if INCLUDE_G1GC 65 #include "gc/g1/g1CollectedHeap.hpp" 66 #endif 67 68 #if INCLUDE_CDS_JAVA_HEAP 69 70 bool HeapShared::_closed_archive_heap_region_mapped = false; 71 bool HeapShared::_open_archive_heap_region_mapped = false; 72 bool HeapShared::_archive_heap_region_fixed = false; 73 address HeapShared::_narrow_oop_base; 74 int HeapShared::_narrow_oop_shift; 75 DumpedInternedStrings *HeapShared::_dumped_interned_strings = NULL; 76 77 // 78 // If you add new entries to the following tables, you should know what you're doing! 79 // 80 81 // Entry fields for shareable subgraphs archived in the closed archive heap 82 // region. Warning: Objects in the subgraphs should not have reference fields 83 // assigned at runtime. 84 static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = { 85 {"java/lang/Integer$IntegerCache", "archivedCache"}, 86 {"java/lang/Long$LongCache", "archivedCache"}, 87 {"java/lang/Byte$ByteCache", "archivedCache"}, 88 {"java/lang/Short$ShortCache", "archivedCache"}, 89 {"java/lang/Character$CharacterCache", "archivedCache"}, 90 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 91 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 92 }; 93 // Entry fields for subgraphs archived in the open archive heap region. 94 static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = { 95 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 96 {"java/util/ImmutableCollections", "archivedObjects"}, 97 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 98 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 99 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 100 }; 101 102 // Entry fields for subgraphs archived in the open archive heap region (full module graph). 103 static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = { 104 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 105 {"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"}, 106 {"java/lang/Module$ArchivedData", "archivedData"}, 107 }; 108 109 const static int num_closed_archive_subgraph_entry_fields = 110 sizeof(closed_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 111 const static int num_open_archive_subgraph_entry_fields = 112 sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 113 const static int num_fmg_open_archive_subgraph_entry_fields = 114 sizeof(fmg_open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 115 116 GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = NULL; 117 narrowOop HeapShared::_roots_narrow; 118 OopHandle HeapShared::_roots; 119 120 //////////////////////////////////////////////////////////////// 121 // 122 // Java heap object archiving support 123 // 124 //////////////////////////////////////////////////////////////// 125 void HeapShared::fixup_mapped_heap_regions() { 126 FileMapInfo *mapinfo = FileMapInfo::current_info(); 127 mapinfo->fixup_mapped_heap_regions(); 128 set_archive_heap_region_fixed(); 129 if (is_mapped()) { 130 _roots = OopHandle(Universe::vm_global(), decode_from_archive(_roots_narrow)); 131 if (!MetaspaceShared::use_full_module_graph()) { 132 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 133 ClassLoaderDataShared::clear_archived_oops(); 134 } 135 } 136 SystemDictionaryShared::update_archived_mirror_native_pointers(); 137 } 138 139 unsigned HeapShared::oop_hash(oop const& p) { 140 assert(!p->mark().has_bias_pattern(), 141 "this object should never have been locked"); // so identity_hash won't safepoin 142 unsigned hash = (unsigned)p->identity_hash(); 143 return hash; 144 } 145 146 static void reset_states(oop obj, TRAPS) { 147 Handle h_obj(THREAD, obj); 148 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 149 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 150 Symbol* method_sig = vmSymbols::void_method_signature(); 151 152 while (klass != NULL) { 153 Method* method = klass->find_method(method_name, method_sig); 154 if (method != NULL) { 155 assert(method->is_private(), "must be"); 156 if (log_is_enabled(Debug, cds)) { 157 ResourceMark rm(THREAD); 158 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 159 } 160 JavaValue result(T_VOID); 161 JavaCalls::call_special(&result, h_obj, klass, 162 method_name, method_sig, CHECK); 163 } 164 klass = klass->java_super(); 165 } 166 } 167 168 void HeapShared::reset_archived_object_states(TRAPS) { 169 assert(DumpSharedSpaces, "dump-time only"); 170 log_debug(cds)("Resetting platform loader"); 171 reset_states(SystemDictionary::java_platform_loader(), CHECK); 172 log_debug(cds)("Resetting system loader"); 173 reset_states(SystemDictionary::java_system_loader(), CHECK); 174 } 175 176 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL; 177 oop HeapShared::find_archived_heap_object(oop obj) { 178 assert(DumpSharedSpaces, "dump-time only"); 179 ArchivedObjectCache* cache = archived_object_cache(); 180 oop* p = cache->get(obj); 181 if (p != NULL) { 182 return *p; 183 } else { 184 return NULL; 185 } 186 } 187 188 int HeapShared::append_root(oop obj) { 189 assert(DumpSharedSpaces, "dump-time only"); 190 191 // No GC should happen since we aren't scanning _pending_roots. 192 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 193 194 if (_pending_roots == NULL) { 195 _pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500); 196 } 197 198 return _pending_roots->append(obj); 199 } 200 201 objArrayOop HeapShared::roots() { 202 if (DumpSharedSpaces) { 203 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 204 if (!is_heap_object_archiving_allowed()) { 205 return NULL; 206 } 207 } else { 208 assert(UseSharedSpaces, "must be"); 209 } 210 211 objArrayOop roots = (objArrayOop)_roots.resolve(); 212 assert(roots != NULL, "should have been initialized"); 213 return roots; 214 } 215 216 void HeapShared::set_roots(narrowOop roots) { 217 assert(UseSharedSpaces, "runtime only"); 218 assert(open_archive_heap_region_mapped(), "must be"); 219 _roots_narrow = roots; 220 } 221 222 // Returns an objArray that contains all the roots of the archived objects 223 oop HeapShared::get_root(int index, bool clear) { 224 assert(index >= 0, "sanity"); 225 if (DumpSharedSpaces) { 226 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 227 assert(_pending_roots != NULL, "sanity"); 228 return _pending_roots->at(index); 229 } else { 230 assert(UseSharedSpaces, "must be"); 231 assert(!_roots.is_empty(), "must have loaded shared heap"); 232 oop result = roots()->obj_at(index); 233 if (clear) { 234 clear_root(index); 235 } 236 return result; 237 } 238 } 239 240 void HeapShared::clear_root(int index) { 241 assert(index >= 0, "sanity"); 242 assert(UseSharedSpaces, "must be"); 243 if (open_archive_heap_region_mapped()) { 244 if (log_is_enabled(Debug, cds, heap)) { 245 oop old = roots()->obj_at(index); 246 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 247 } 248 roots()->obj_at_put(index, NULL); 249 } 250 } 251 252 oop HeapShared::archive_heap_object(oop obj) { 253 assert(DumpSharedSpaces, "dump-time only"); 254 255 oop ao = find_archived_heap_object(obj); 256 if (ao != NULL) { 257 // already archived 258 return ao; 259 } 260 261 int len = obj->size(); 262 if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) { 263 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT, 264 p2i(obj), (size_t)obj->size()); 265 return NULL; 266 } 267 268 oop archived_oop = cast_to_oop(G1CollectedHeap::heap()->archive_mem_allocate(len)); 269 if (archived_oop != NULL) { 270 Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len); 271 // Reinitialize markword to remove age/marking/locking/etc. 272 // 273 // We need to retain the identity_hash, because it may have been used by some hashtables 274 // in the shared heap. This also has the side effect of pre-initializing the 275 // identity_hash for all shared objects, so they are less likely to be written 276 // into during run time, increasing the potential of memory sharing. 277 int hash_original = obj->identity_hash(); 278 archived_oop->set_mark(markWord::prototype().copy_set_hash(hash_original)); 279 assert(archived_oop->mark().is_unlocked(), "sanity"); 280 281 DEBUG_ONLY(int hash_archived = archived_oop->identity_hash()); 282 assert(hash_original == hash_archived, "Different hash codes: original %x, archived %x", hash_original, hash_archived); 283 284 ArchivedObjectCache* cache = archived_object_cache(); 285 cache->put(obj, archived_oop); 286 if (log_is_enabled(Debug, cds, heap)) { 287 ResourceMark rm; 288 log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT " : %s", 289 p2i(obj), p2i(archived_oop), obj->klass()->external_name()); 290 } 291 } else { 292 log_error(cds, heap)( 293 "Cannot allocate space for object " PTR_FORMAT " in archived heap region", 294 p2i(obj)); 295 vm_direct_exit(-1, 296 err_msg("Out of memory. Please run with a larger Java heap, current MaxHeapSize = " 297 SIZE_FORMAT "M", MaxHeapSize/M)); 298 } 299 return archived_oop; 300 } 301 302 void HeapShared::archive_klass_objects() { 303 GrowableArray<Klass*>* klasses = ArchiveBuilder::current()->klasses(); 304 assert(klasses != NULL, "sanity"); 305 for (int i = 0; i < klasses->length(); i++) { 306 Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i)); 307 308 // archive mirror object 309 java_lang_Class::archive_mirror(k); 310 311 // archive the resolved_referenes array 312 if (k->is_instance_klass()) { 313 InstanceKlass* ik = InstanceKlass::cast(k); 314 ik->constants()->archive_resolved_references(); 315 } 316 } 317 } 318 319 void HeapShared::run_full_gc_in_vm_thread() { 320 if (is_heap_object_archiving_allowed()) { 321 // Avoid fragmentation while archiving heap objects. 322 // We do this inside a safepoint, so that no further allocation can happen after GC 323 // has finished. 324 if (GCLocker::is_active()) { 325 // Just checking for safety ... 326 // This should not happen during -Xshare:dump. If you see this, probably the Java core lib 327 // has been modified such that JNI code is executed in some clean up threads after 328 // we have finished class loading. 329 log_warning(cds)("GC locker is held, unable to start extra compacting GC. This may produce suboptimal results."); 330 } else { 331 log_info(cds)("Run GC ..."); 332 Universe::heap()->collect_as_vm_thread(GCCause::_archive_time_gc); 333 log_info(cds)("Run GC done"); 334 } 335 } 336 } 337 338 void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion>* closed, 339 GrowableArray<MemRegion>* open) { 340 341 G1HeapVerifier::verify_ready_for_archiving(); 342 343 { 344 NoSafepointVerifier nsv; 345 346 // Cache for recording where the archived objects are copied to 347 create_archived_object_cache(); 348 349 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 350 p2i(CompressedOops::begin()), p2i(CompressedOops::end())); 351 log_info(cds)("Dumping objects to closed archive heap region ..."); 352 copy_closed_archive_heap_objects(closed); 353 354 log_info(cds)("Dumping objects to open archive heap region ..."); 355 copy_open_archive_heap_objects(open); 356 357 destroy_archived_object_cache(); 358 } 359 360 G1HeapVerifier::verify_archive_regions(); 361 } 362 363 void HeapShared::copy_closed_archive_heap_objects( 364 GrowableArray<MemRegion> * closed_archive) { 365 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 366 367 G1CollectedHeap::heap()->begin_archive_alloc_range(); 368 369 // Archive interned string objects 370 StringTable::write_to_archive(_dumped_interned_strings); 371 372 archive_object_subgraphs(closed_archive_subgraph_entry_fields, 373 num_closed_archive_subgraph_entry_fields, 374 true /* is_closed_archive */, 375 false /* is_full_module_graph */); 376 377 G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive, 378 os::vm_allocation_granularity()); 379 } 380 381 void HeapShared::copy_open_archive_heap_objects( 382 GrowableArray<MemRegion> * open_archive) { 383 assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects"); 384 385 G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */); 386 387 java_lang_Class::archive_basic_type_mirrors(); 388 389 archive_klass_objects(); 390 391 archive_object_subgraphs(open_archive_subgraph_entry_fields, 392 num_open_archive_subgraph_entry_fields, 393 false /* is_closed_archive */, 394 false /* is_full_module_graph */); 395 if (MetaspaceShared::use_full_module_graph()) { 396 archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields, 397 num_fmg_open_archive_subgraph_entry_fields, 398 false /* is_closed_archive */, 399 true /* is_full_module_graph */); 400 ClassLoaderDataShared::init_archived_oops(); 401 } 402 403 copy_roots(); 404 405 G1CollectedHeap::heap()->end_archive_alloc_range(open_archive, 406 os::vm_allocation_granularity()); 407 } 408 409 // Copy _pending_archive_roots into an objArray 410 void HeapShared::copy_roots() { 411 int length = _pending_roots != NULL ? _pending_roots->length() : 0; 412 int size = objArrayOopDesc::object_size(length); 413 Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass 414 HeapWord* mem = G1CollectedHeap::heap()->archive_mem_allocate(size); 415 416 memset(mem, 0, size * BytesPerWord); 417 { 418 // This is copied from MemAllocator::finish 419 if (UseBiasedLocking) { 420 oopDesc::set_mark(mem, k->prototype_header()); 421 } else { 422 oopDesc::set_mark(mem, markWord::prototype()); 423 } 424 oopDesc::release_set_klass(mem, k); 425 } 426 { 427 // This is copied from ObjArrayAllocator::initialize 428 arrayOopDesc::set_length(mem, length); 429 } 430 431 _roots = OopHandle(Universe::vm_global(), cast_to_oop(mem)); 432 for (int i = 0; i < length; i++) { 433 roots()->obj_at_put(i, _pending_roots->at(i)); 434 } 435 log_info(cds)("archived obj roots[%d] = %d words, klass = %p, obj = %p", length, size, k, mem); 436 } 437 438 void HeapShared::init_narrow_oop_decoding(address base, int shift) { 439 _narrow_oop_base = base; 440 _narrow_oop_shift = shift; 441 } 442 443 // 444 // Subgraph archiving support 445 // 446 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL; 447 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 448 449 // Get the subgraph_info for Klass k. A new subgraph_info is created if 450 // there is no existing one for k. The subgraph_info records the relocated 451 // Klass* of the original k. 452 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 453 assert(DumpSharedSpaces, "dump time only"); 454 bool created; 455 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 456 KlassSubGraphInfo* info = 457 _dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph), 458 &created); 459 assert(created, "must not initialize twice"); 460 return info; 461 } 462 463 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 464 assert(DumpSharedSpaces, "dump time only"); 465 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k); 466 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k); 467 assert(info != NULL, "must have been initialized"); 468 return info; 469 } 470 471 // Add an entry field to the current KlassSubGraphInfo. 472 void KlassSubGraphInfo::add_subgraph_entry_field( 473 int static_field_offset, oop v, bool is_closed_archive) { 474 assert(DumpSharedSpaces, "dump time only"); 475 if (_subgraph_entry_fields == NULL) { 476 _subgraph_entry_fields = 477 new(ResourceObj::C_HEAP, mtClass) GrowableArray<int>(10, mtClass); 478 } 479 _subgraph_entry_fields->append(static_field_offset); 480 _subgraph_entry_fields->append(HeapShared::append_root(v)); 481 } 482 483 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 484 // Only objects of boot classes can be included in sub-graph. 485 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 486 assert(DumpSharedSpaces, "dump time only"); 487 Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k); 488 489 if (_subgraph_object_klasses == NULL) { 490 _subgraph_object_klasses = 491 new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass); 492 } 493 494 assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class"); 495 496 if (_k == relocated_k) { 497 // Don't add the Klass containing the sub-graph to it's own klass 498 // initialization list. 499 return; 500 } 501 502 if (relocated_k->is_instance_klass()) { 503 assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), 504 "must be boot class"); 505 // vmClasses::xxx_klass() are not updated, need to check 506 // the original Klass* 507 if (orig_k == vmClasses::String_klass() || 508 orig_k == vmClasses::Object_klass()) { 509 // Initialized early during VM initialization. No need to be added 510 // to the sub-graph object class list. 511 return; 512 } 513 } else if (relocated_k->is_objArray_klass()) { 514 Klass* abk = ObjArrayKlass::cast(relocated_k)->bottom_klass(); 515 if (abk->is_instance_klass()) { 516 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 517 "must be boot class"); 518 } 519 if (relocated_k == Universe::objectArrayKlassObj()) { 520 // Initialized early during Universe::genesis. No need to be added 521 // to the list. 522 return; 523 } 524 } else { 525 assert(relocated_k->is_typeArray_klass(), "must be"); 526 // Primitive type arrays are created early during Universe::genesis. 527 return; 528 } 529 530 if (log_is_enabled(Debug, cds, heap)) { 531 if (!_subgraph_object_klasses->contains(relocated_k)) { 532 ResourceMark rm; 533 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 534 } 535 } 536 537 _subgraph_object_klasses->append_if_missing(relocated_k); 538 _has_non_early_klasses |= is_non_early_klass(orig_k); 539 } 540 541 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 542 if (k->is_objArray_klass()) { 543 k = ObjArrayKlass::cast(k)->bottom_klass(); 544 } 545 if (k->is_instance_klass()) { 546 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 547 ResourceMark rm; 548 log_info(cds, heap)("non-early: %s", k->external_name()); 549 return true; 550 } else { 551 return false; 552 } 553 } else { 554 return false; 555 } 556 } 557 558 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 559 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 560 _k = info->klass(); 561 _entry_field_records = NULL; 562 _subgraph_object_klasses = NULL; 563 _is_full_module_graph = info->is_full_module_graph(); 564 565 if (_is_full_module_graph) { 566 // Consider all classes referenced by the full module graph as early -- we will be 567 // allocating objects of these classes during JVMTI early phase, so they cannot 568 // be processed by (non-early) JVMTI ClassFileLoadHook 569 _has_non_early_klasses = false; 570 } else { 571 _has_non_early_klasses = info->has_non_early_klasses(); 572 } 573 574 if (_has_non_early_klasses) { 575 ResourceMark rm; 576 log_info(cds, heap)( 577 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 578 _k->external_name()); 579 } 580 581 // populate the entry fields 582 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 583 if (entry_fields != NULL) { 584 int num_entry_fields = entry_fields->length(); 585 assert(num_entry_fields % 2 == 0, "sanity"); 586 _entry_field_records = 587 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 588 for (int i = 0 ; i < num_entry_fields; i++) { 589 _entry_field_records->at_put(i, entry_fields->at(i)); 590 } 591 } 592 593 // the Klasses of the objects in the sub-graphs 594 GrowableArray<Klass*>* subgraph_object_klasses = info->subgraph_object_klasses(); 595 if (subgraph_object_klasses != NULL) { 596 int num_subgraphs_klasses = subgraph_object_klasses->length(); 597 _subgraph_object_klasses = 598 ArchiveBuilder::new_ro_array<Klass*>(num_subgraphs_klasses); 599 for (int i = 0; i < num_subgraphs_klasses; i++) { 600 Klass* subgraph_k = subgraph_object_klasses->at(i); 601 if (log_is_enabled(Info, cds, heap)) { 602 ResourceMark rm; 603 log_info(cds, heap)( 604 "Archived object klass %s (%2d) => %s", 605 _k->external_name(), i, subgraph_k->external_name()); 606 } 607 _subgraph_object_klasses->at_put(i, subgraph_k); 608 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(i)); 609 } 610 } 611 612 ArchivePtrMarker::mark_pointer(&_k); 613 ArchivePtrMarker::mark_pointer(&_entry_field_records); 614 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 615 } 616 617 struct CopyKlassSubGraphInfoToArchive : StackObj { 618 CompactHashtableWriter* _writer; 619 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 620 621 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 622 if (info.subgraph_object_klasses() != NULL || info.subgraph_entry_fields() != NULL) { 623 ArchivedKlassSubGraphInfoRecord* record = 624 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 625 record->init(&info); 626 627 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass); 628 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 629 _writer->add(hash, delta); 630 } 631 return true; // keep on iterating 632 } 633 }; 634 635 // Build the records of archived subgraph infos, which include: 636 // - Entry points to all subgraphs from the containing class mirror. The entry 637 // points are static fields in the mirror. For each entry point, the field 638 // offset, value and is_closed_archive flag are recorded in the sub-graph 639 // info. The value is stored back to the corresponding field at runtime. 640 // - A list of klasses that need to be loaded/initialized before archived 641 // java object sub-graph can be accessed at runtime. 642 void HeapShared::write_subgraph_info_table() { 643 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 644 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 645 CompactHashtableStats stats; 646 647 _run_time_subgraph_info_table.reset(); 648 649 CompactHashtableWriter writer(d_table->_count, &stats); 650 CopyKlassSubGraphInfoToArchive copy(&writer); 651 d_table->iterate(©); 652 653 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 654 } 655 656 void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) { 657 _run_time_subgraph_info_table.serialize_header(soc); 658 } 659 660 static void verify_the_heap(Klass* k, const char* which) { 661 if (VerifyArchivedFields > 0) { 662 ResourceMark rm; 663 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 664 which, k->external_name()); 665 666 VM_Verify verify_op; 667 VMThread::execute(&verify_op); 668 669 if (VerifyArchivedFields > 1 && is_init_completed()) { 670 // At this time, the oop->klass() of some archived objects in the heap may not 671 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 672 // have enough information (object size, oop maps, etc) so that a GC can be safely 673 // performed. 674 // 675 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 676 // to check for GC safety. 677 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 678 which, k->external_name()); 679 FlagSetting fs1(VerifyBeforeGC, true); 680 FlagSetting fs2(VerifyDuringGC, true); 681 FlagSetting fs3(VerifyAfterGC, true); 682 Universe::heap()->collect(GCCause::_java_lang_system_gc); 683 } 684 } 685 } 686 687 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 688 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 689 // 690 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 691 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 692 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 693 void HeapShared::resolve_classes(JavaThread* THREAD) { 694 if (!is_mapped()) { 695 return; // nothing to do 696 } 697 resolve_classes_for_subgraphs(closed_archive_subgraph_entry_fields, 698 num_closed_archive_subgraph_entry_fields, 699 THREAD); 700 resolve_classes_for_subgraphs(open_archive_subgraph_entry_fields, 701 num_open_archive_subgraph_entry_fields, 702 THREAD); 703 resolve_classes_for_subgraphs(fmg_open_archive_subgraph_entry_fields, 704 num_fmg_open_archive_subgraph_entry_fields, 705 THREAD); 706 } 707 708 void HeapShared::resolve_classes_for_subgraphs(ArchivableStaticFieldInfo fields[], 709 int num, JavaThread* THREAD) { 710 for (int i = 0; i < num; i++) { 711 ArchivableStaticFieldInfo* info = &fields[i]; 712 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 713 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 714 assert(k != NULL && k->is_shared_boot_class(), "sanity"); 715 resolve_classes_for_subgraph_of(k, THREAD); 716 } 717 } 718 719 void HeapShared::resolve_classes_for_subgraph_of(Klass* k, JavaThread* THREAD) { 720 ExceptionMark em(THREAD); 721 const ArchivedKlassSubGraphInfoRecord* record = 722 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 723 if (HAS_PENDING_EXCEPTION) { 724 CLEAR_PENDING_EXCEPTION; 725 } 726 if (record == NULL) { 727 clear_archived_roots_of(k); 728 } 729 } 730 731 void HeapShared::initialize_from_archived_subgraph(Klass* k, JavaThread* THREAD) { 732 if (!is_mapped()) { 733 return; // nothing to do 734 } 735 736 ExceptionMark em(THREAD); 737 const ArchivedKlassSubGraphInfoRecord* record = 738 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 739 740 if (HAS_PENDING_EXCEPTION) { 741 CLEAR_PENDING_EXCEPTION; 742 // None of the field value will be set if there was an exception when initializing the classes. 743 // The java code will not see any of the archived objects in the 744 // subgraphs referenced from k in this case. 745 return; 746 } 747 748 if (record != NULL) { 749 init_archived_fields_for(k, record); 750 } 751 } 752 753 const ArchivedKlassSubGraphInfoRecord* 754 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 755 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 756 757 if (!k->is_shared()) { 758 return NULL; 759 } 760 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 761 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 762 763 // Initialize from archived data. Currently this is done only 764 // during VM initialization time. No lock is needed. 765 if (record != NULL) { 766 if (record->is_full_module_graph() && !MetaspaceShared::use_full_module_graph()) { 767 if (log_is_enabled(Info, cds, heap)) { 768 ResourceMark rm(THREAD); 769 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 770 k->external_name()); 771 } 772 return NULL; 773 } 774 775 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 776 if (log_is_enabled(Info, cds, heap)) { 777 ResourceMark rm(THREAD); 778 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 779 k->external_name()); 780 } 781 return NULL; 782 } 783 784 resolve_or_init(k, do_init, CHECK_NULL); 785 786 // Load/link/initialize the klasses of the objects in the subgraph. 787 // NULL class loader is used. 788 Array<Klass*>* klasses = record->subgraph_object_klasses(); 789 if (klasses != NULL) { 790 for (int i = 0; i < klasses->length(); i++) { 791 Klass* klass = klasses->at(i); 792 if (!klass->is_shared()) { 793 return NULL; 794 } 795 resolve_or_init(klass, do_init, CHECK_NULL); 796 } 797 } 798 } 799 800 return record; 801 } 802 803 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 804 if (!do_init) { 805 if (k->class_loader_data() == NULL) { 806 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 807 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 808 } 809 } else { 810 assert(k->class_loader_data() != NULL, "must have been resolved by HeapShared::resolve_classes"); 811 if (k->is_instance_klass()) { 812 InstanceKlass* ik = InstanceKlass::cast(k); 813 ik->initialize(CHECK); 814 } else if (k->is_objArray_klass()) { 815 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 816 oak->initialize(CHECK); 817 } 818 } 819 } 820 821 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 822 verify_the_heap(k, "before"); 823 824 // Load the subgraph entry fields from the record and store them back to 825 // the corresponding fields within the mirror. 826 oop m = k->java_mirror(); 827 Array<int>* entry_field_records = record->entry_field_records(); 828 if (entry_field_records != NULL) { 829 int efr_len = entry_field_records->length(); 830 assert(efr_len % 2 == 0, "sanity"); 831 for (int i = 0; i < efr_len; i += 2) { 832 int field_offset = entry_field_records->at(i); 833 int root_index = entry_field_records->at(i+1); 834 oop v = get_root(root_index, /*clear=*/true); 835 m->obj_field_put(field_offset, v); 836 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 837 } 838 839 // Done. Java code can see the archived sub-graphs referenced from k's 840 // mirror after this point. 841 if (log_is_enabled(Info, cds, heap)) { 842 ResourceMark rm; 843 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s", 844 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : ""); 845 } 846 } 847 848 verify_the_heap(k, "after "); 849 } 850 851 void HeapShared::clear_archived_roots_of(Klass* k) { 852 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 853 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 854 if (record != NULL) { 855 Array<int>* entry_field_records = record->entry_field_records(); 856 if (entry_field_records != NULL) { 857 int efr_len = entry_field_records->length(); 858 assert(efr_len % 2 == 0, "sanity"); 859 for (int i = 0; i < efr_len; i += 2) { 860 int root_index = entry_field_records->at(i+1); 861 clear_root(root_index); 862 } 863 } 864 } 865 } 866 867 class WalkOopAndArchiveClosure: public BasicOopIterateClosure { 868 int _level; 869 bool _is_closed_archive; 870 bool _record_klasses_only; 871 KlassSubGraphInfo* _subgraph_info; 872 oop _orig_referencing_obj; 873 oop _archived_referencing_obj; 874 public: 875 WalkOopAndArchiveClosure(int level, 876 bool is_closed_archive, 877 bool record_klasses_only, 878 KlassSubGraphInfo* subgraph_info, 879 oop orig, oop archived) : 880 _level(level), _is_closed_archive(is_closed_archive), 881 _record_klasses_only(record_klasses_only), 882 _subgraph_info(subgraph_info), 883 _orig_referencing_obj(orig), _archived_referencing_obj(archived) {} 884 void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 885 void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } 886 887 protected: 888 template <class T> void do_oop_work(T *p) { 889 oop obj = RawAccess<>::oop_load(p); 890 if (!CompressedOops::is_null(obj)) { 891 assert(!HeapShared::is_archived_object(obj), 892 "original objects must not point to archived objects"); 893 894 size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char)); 895 T* new_p = (T*)(cast_from_oop<address>(_archived_referencing_obj) + field_delta); 896 897 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 898 ResourceMark rm; 899 log_debug(cds, heap)("(%d) %s[" SIZE_FORMAT "] ==> " PTR_FORMAT " size %d %s", _level, 900 _orig_referencing_obj->klass()->external_name(), field_delta, 901 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 902 LogTarget(Trace, cds, heap) log; 903 LogStream out(log); 904 obj->print_on(&out); 905 } 906 907 oop archived = HeapShared::archive_reachable_objects_from( 908 _level + 1, _subgraph_info, obj, _is_closed_archive); 909 assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1"); 910 assert(HeapShared::is_archived_object(archived), "must be"); 911 912 if (!_record_klasses_only) { 913 // Update the reference in the archived copy of the referencing object. 914 log_debug(cds, heap)("(%d) updating oop @[" PTR_FORMAT "] " PTR_FORMAT " ==> " PTR_FORMAT, 915 _level, p2i(new_p), p2i(obj), p2i(archived)); 916 RawAccess<IS_NOT_NULL>::oop_store(new_p, archived); 917 } 918 } 919 } 920 }; 921 922 void HeapShared::check_closed_archive_heap_region_object(InstanceKlass* k) { 923 // Check fields in the object 924 for (JavaFieldStream fs(k); !fs.done(); fs.next()) { 925 if (!fs.access_flags().is_static()) { 926 BasicType ft = fs.field_descriptor().field_type(); 927 if (!fs.access_flags().is_final() && is_reference_type(ft)) { 928 ResourceMark rm; 929 log_warning(cds, heap)( 930 "Please check reference field in %s instance in closed archive heap region: %s %s", 931 k->external_name(), (fs.name())->as_C_string(), 932 (fs.signature())->as_C_string()); 933 } 934 } 935 } 936 } 937 938 void HeapShared::check_module_oop(oop orig_module_obj) { 939 assert(DumpSharedSpaces, "must be"); 940 assert(java_lang_Module::is_instance(orig_module_obj), "must be"); 941 ModuleEntry* orig_module_ent = java_lang_Module::module_entry_raw(orig_module_obj); 942 if (orig_module_ent == NULL) { 943 // These special Module objects are created in Java code. They are not 944 // defined via Modules::define_module(), so they don't have a ModuleEntry: 945 // java.lang.Module::ALL_UNNAMED_MODULE 946 // java.lang.Module::EVERYONE_MODULE 947 // jdk.internal.loader.ClassLoaders$BootClassLoader::unnamedModule 948 assert(java_lang_Module::name(orig_module_obj) == NULL, "must be unnamed"); 949 log_info(cds, heap)("Module oop with No ModuleEntry* @[" PTR_FORMAT "]", p2i(orig_module_obj)); 950 } else { 951 ClassLoaderData* loader_data = orig_module_ent->loader_data(); 952 assert(loader_data->is_builtin_class_loader_data(), "must be"); 953 } 954 } 955 956 957 // (1) If orig_obj has not been archived yet, archive it. 958 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 959 // trace all objects that are reachable from it, and make sure these objects are archived. 960 // (3) Record the klasses of all orig_obj and all reachable objects. 961 oop HeapShared::archive_reachable_objects_from(int level, 962 KlassSubGraphInfo* subgraph_info, 963 oop orig_obj, 964 bool is_closed_archive) { 965 assert(orig_obj != NULL, "must be"); 966 assert(!is_archived_object(orig_obj), "sanity"); 967 968 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 969 // This object has injected fields that cannot be supported easily, so we disallow them for now. 970 // If you get an error here, you probably made a change in the JDK library that has added 971 // these objects that are referenced (directly or indirectly) by static fields. 972 ResourceMark rm; 973 log_error(cds, heap)("Cannot archive object of class %s", orig_obj->klass()->external_name()); 974 vm_direct_exit(1); 975 } 976 977 // java.lang.Class instances cannot be included in an archived object sub-graph. We only support 978 // them as Klass::_archived_mirror because they need to be specially restored at run time. 979 // 980 // If you get an error here, you probably made a change in the JDK library that has added a Class 981 // object that is referenced (directly or indirectly) by static fields. 982 if (java_lang_Class::is_instance(orig_obj)) { 983 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 984 vm_direct_exit(1); 985 } 986 987 oop archived_obj = find_archived_heap_object(orig_obj); 988 if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) { 989 // To save time, don't walk strings that are already archived. They just contain 990 // pointers to a type array, whose klass doesn't need to be recorded. 991 return archived_obj; 992 } 993 994 if (has_been_seen_during_subgraph_recording(orig_obj)) { 995 // orig_obj has already been archived and traced. Nothing more to do. 996 return archived_obj; 997 } else { 998 set_has_been_seen_during_subgraph_recording(orig_obj); 999 } 1000 1001 bool record_klasses_only = (archived_obj != NULL); 1002 if (archived_obj == NULL) { 1003 ++_num_new_archived_objs; 1004 archived_obj = archive_heap_object(orig_obj); 1005 if (archived_obj == NULL) { 1006 // Skip archiving the sub-graph referenced from the current entry field. 1007 ResourceMark rm; 1008 log_error(cds, heap)( 1009 "Cannot archive the sub-graph referenced from %s object (" 1010 PTR_FORMAT ") size %d, skipped.", 1011 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1012 if (level == 1) { 1013 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1014 // as the Java code will take care of initializing this field dynamically. 1015 return NULL; 1016 } else { 1017 // We don't know how to handle an object that has been archived, but some of its reachable 1018 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1019 // we have a real use case. 1020 vm_direct_exit(1); 1021 } 1022 } 1023 1024 if (java_lang_Module::is_instance(orig_obj)) { 1025 check_module_oop(orig_obj); 1026 java_lang_Module::set_module_entry(archived_obj, NULL); 1027 java_lang_Module::set_loader(archived_obj, NULL); 1028 } else if (java_lang_ClassLoader::is_instance(orig_obj)) { 1029 // class_data will be restored explicitly at run time. 1030 guarantee(orig_obj == SystemDictionary::java_platform_loader() || 1031 orig_obj == SystemDictionary::java_system_loader() || 1032 java_lang_ClassLoader::loader_data_raw(orig_obj) == NULL, "must be"); 1033 java_lang_ClassLoader::release_set_loader_data(archived_obj, NULL); 1034 } 1035 } 1036 1037 assert(archived_obj != NULL, "must be"); 1038 Klass *orig_k = orig_obj->klass(); 1039 subgraph_info->add_subgraph_object_klass(orig_k); 1040 1041 WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only, 1042 subgraph_info, orig_obj, archived_obj); 1043 orig_obj->oop_iterate(&walker); 1044 if (is_closed_archive && orig_k->is_instance_klass()) { 1045 check_closed_archive_heap_region_object(InstanceKlass::cast(orig_k)); 1046 } 1047 return archived_obj; 1048 } 1049 1050 // 1051 // Start from the given static field in a java mirror and archive the 1052 // complete sub-graph of java heap objects that are reached directly 1053 // or indirectly from the starting object by following references. 1054 // Sub-graph archiving restrictions (current): 1055 // 1056 // - All classes of objects in the archived sub-graph (including the 1057 // entry class) must be boot class only. 1058 // - No java.lang.Class instance (java mirror) can be included inside 1059 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1060 // 1061 // The Java heap object sub-graph archiving process (see 1062 // WalkOopAndArchiveClosure): 1063 // 1064 // 1) Java object sub-graph archiving starts from a given static field 1065 // within a Class instance (java mirror). If the static field is a 1066 // refererence field and points to a non-null java object, proceed to 1067 // the next step. 1068 // 1069 // 2) Archives the referenced java object. If an archived copy of the 1070 // current object already exists, updates the pointer in the archived 1071 // copy of the referencing object to point to the current archived object. 1072 // Otherwise, proceed to the next step. 1073 // 1074 // 3) Follows all references within the current java object and recursively 1075 // archive the sub-graph of objects starting from each reference. 1076 // 1077 // 4) Updates the pointer in the archived copy of referencing object to 1078 // point to the current archived object. 1079 // 1080 // 5) The Klass of the current java object is added to the list of Klasses 1081 // for loading and initialzing before any object in the archived graph can 1082 // be accessed at runtime. 1083 // 1084 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1085 const char* klass_name, 1086 int field_offset, 1087 const char* field_name, 1088 bool is_closed_archive) { 1089 assert(DumpSharedSpaces, "dump time only"); 1090 assert(k->is_shared_boot_class(), "must be boot class"); 1091 1092 oop m = k->java_mirror(); 1093 1094 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1095 oop f = m->obj_field(field_offset); 1096 1097 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1098 1099 if (!CompressedOops::is_null(f)) { 1100 if (log_is_enabled(Trace, cds, heap)) { 1101 LogTarget(Trace, cds, heap) log; 1102 LogStream out(log); 1103 f->print_on(&out); 1104 } 1105 1106 oop af = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive); 1107 1108 if (af == NULL) { 1109 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1110 klass_name, field_name); 1111 } else { 1112 // Note: the field value is not preserved in the archived mirror. 1113 // Record the field as a new subGraph entry point. The recorded 1114 // information is restored from the archive at runtime. 1115 subgraph_info->add_subgraph_entry_field(field_offset, af, is_closed_archive); 1116 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(af)); 1117 } 1118 } else { 1119 // The field contains null, we still need to record the entry point, 1120 // so it can be restored at runtime. 1121 subgraph_info->add_subgraph_entry_field(field_offset, NULL, false); 1122 } 1123 } 1124 1125 #ifndef PRODUCT 1126 class VerifySharedOopClosure: public BasicOopIterateClosure { 1127 private: 1128 bool _is_archived; 1129 1130 public: 1131 VerifySharedOopClosure(bool is_archived) : _is_archived(is_archived) {} 1132 1133 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1134 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1135 1136 protected: 1137 template <class T> void do_oop_work(T *p) { 1138 oop obj = RawAccess<>::oop_load(p); 1139 if (!CompressedOops::is_null(obj)) { 1140 HeapShared::verify_reachable_objects_from(obj, _is_archived); 1141 } 1142 } 1143 }; 1144 1145 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1146 assert(DumpSharedSpaces, "dump time only"); 1147 assert(k->is_shared_boot_class(), "must be boot class"); 1148 1149 oop m = k->java_mirror(); 1150 oop f = m->obj_field(field_offset); 1151 if (!CompressedOops::is_null(f)) { 1152 verify_subgraph_from(f); 1153 } 1154 } 1155 1156 void HeapShared::verify_subgraph_from(oop orig_obj) { 1157 oop archived_obj = find_archived_heap_object(orig_obj); 1158 if (archived_obj == NULL) { 1159 // It's OK for the root of a subgraph to be not archived. See comments in 1160 // archive_reachable_objects_from(). 1161 return; 1162 } 1163 1164 // Verify that all objects reachable from orig_obj are archived. 1165 init_seen_objects_table(); 1166 verify_reachable_objects_from(orig_obj, false); 1167 delete_seen_objects_table(); 1168 1169 // Note: we could also verify that all objects reachable from the archived 1170 // copy of orig_obj can only point to archived objects, with: 1171 // init_seen_objects_table(); 1172 // verify_reachable_objects_from(archived_obj, true); 1173 // init_seen_objects_table(); 1174 // but that's already done in G1HeapVerifier::verify_archive_regions so we 1175 // won't do it here. 1176 } 1177 1178 void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) { 1179 _num_total_verifications ++; 1180 if (!has_been_seen_during_subgraph_recording(obj)) { 1181 set_has_been_seen_during_subgraph_recording(obj); 1182 1183 if (is_archived) { 1184 assert(is_archived_object(obj), "must be"); 1185 assert(find_archived_heap_object(obj) == NULL, "must be"); 1186 } else { 1187 assert(!is_archived_object(obj), "must be"); 1188 assert(find_archived_heap_object(obj) != NULL, "must be"); 1189 } 1190 1191 VerifySharedOopClosure walker(is_archived); 1192 obj->oop_iterate(&walker); 1193 } 1194 } 1195 #endif 1196 1197 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = NULL; 1198 int HeapShared::_num_new_walked_objs; 1199 int HeapShared::_num_new_archived_objs; 1200 int HeapShared::_num_old_recorded_klasses; 1201 1202 int HeapShared::_num_total_subgraph_recordings = 0; 1203 int HeapShared::_num_total_walked_objs = 0; 1204 int HeapShared::_num_total_archived_objs = 0; 1205 int HeapShared::_num_total_recorded_klasses = 0; 1206 int HeapShared::_num_total_verifications = 0; 1207 1208 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 1209 return _seen_objects_table->get(obj) != NULL; 1210 } 1211 1212 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 1213 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 1214 _seen_objects_table->put(obj, true); 1215 ++ _num_new_walked_objs; 1216 } 1217 1218 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 1219 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 1220 init_subgraph_info(k, is_full_module_graph); 1221 init_seen_objects_table(); 1222 _num_new_walked_objs = 0; 1223 _num_new_archived_objs = 0; 1224 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 1225 } 1226 1227 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 1228 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 1229 _num_old_recorded_klasses; 1230 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 1231 "walked %d objs, archived %d new objs, recorded %d classes", 1232 class_name, _num_new_walked_objs, _num_new_archived_objs, 1233 num_new_recorded_klasses); 1234 1235 delete_seen_objects_table(); 1236 1237 _num_total_subgraph_recordings ++; 1238 _num_total_walked_objs += _num_new_walked_objs; 1239 _num_total_archived_objs += _num_new_archived_objs; 1240 _num_total_recorded_klasses += num_new_recorded_klasses; 1241 } 1242 1243 class ArchivableStaticFieldFinder: public FieldClosure { 1244 InstanceKlass* _ik; 1245 Symbol* _field_name; 1246 bool _found; 1247 int _offset; 1248 public: 1249 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 1250 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 1251 1252 virtual void do_field(fieldDescriptor* fd) { 1253 if (fd->name() == _field_name) { 1254 assert(!_found, "fields cannot be overloaded"); 1255 assert(is_reference_type(fd->field_type()), "can archive only fields that are references"); 1256 _found = true; 1257 _offset = fd->offset(); 1258 } 1259 } 1260 bool found() { return _found; } 1261 int offset() { return _offset; } 1262 }; 1263 1264 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 1265 int num, TRAPS) { 1266 for (int i = 0; i < num; i++) { 1267 ArchivableStaticFieldInfo* info = &fields[i]; 1268 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1269 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 1270 1271 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, CHECK); 1272 InstanceKlass* ik = InstanceKlass::cast(k); 1273 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 1274 "Only support boot classes"); 1275 ik->initialize(CHECK); 1276 1277 ArchivableStaticFieldFinder finder(ik, field_name); 1278 ik->do_local_static_fields(&finder); 1279 assert(finder.found(), "field must exist"); 1280 1281 info->klass = ik; 1282 info->offset = finder.offset(); 1283 } 1284 } 1285 1286 void HeapShared::init_subgraph_entry_fields(TRAPS) { 1287 assert(is_heap_object_archiving_allowed(), "Sanity check"); 1288 _dump_time_subgraph_info_table = new (ResourceObj::C_HEAP, mtClass)DumpTimeKlassSubGraphInfoTable(); 1289 init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, 1290 num_closed_archive_subgraph_entry_fields, 1291 CHECK); 1292 init_subgraph_entry_fields(open_archive_subgraph_entry_fields, 1293 num_open_archive_subgraph_entry_fields, 1294 CHECK); 1295 if (MetaspaceShared::use_full_module_graph()) { 1296 init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, 1297 num_fmg_open_archive_subgraph_entry_fields, 1298 CHECK); 1299 } 1300 } 1301 1302 void HeapShared::init_for_dumping(TRAPS) { 1303 if (is_heap_object_archiving_allowed()) { 1304 _dumped_interned_strings = new (ResourceObj::C_HEAP, mtClass)DumpedInternedStrings(); 1305 init_subgraph_entry_fields(CHECK); 1306 } 1307 } 1308 1309 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 1310 int num, bool is_closed_archive, 1311 bool is_full_module_graph) { 1312 _num_total_subgraph_recordings = 0; 1313 _num_total_walked_objs = 0; 1314 _num_total_archived_objs = 0; 1315 _num_total_recorded_klasses = 0; 1316 _num_total_verifications = 0; 1317 1318 // For each class X that has one or more archived fields: 1319 // [1] Dump the subgraph of each archived field 1320 // [2] Create a list of all the class of the objects that can be reached 1321 // by any of these static fields. 1322 // At runtime, these classes are initialized before X's archived fields 1323 // are restored by HeapShared::initialize_from_archived_subgraph(). 1324 int i; 1325 for (i = 0; i < num; ) { 1326 ArchivableStaticFieldInfo* info = &fields[i]; 1327 const char* klass_name = info->klass_name; 1328 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 1329 1330 // If you have specified consecutive fields of the same klass in 1331 // fields[], these will be archived in the same 1332 // {start_recording_subgraph ... done_recording_subgraph} pass to 1333 // save time. 1334 for (; i < num; i++) { 1335 ArchivableStaticFieldInfo* f = &fields[i]; 1336 if (f->klass_name != klass_name) { 1337 break; 1338 } 1339 1340 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 1341 f->offset, f->field_name, 1342 is_closed_archive); 1343 } 1344 done_recording_subgraph(info->klass, klass_name); 1345 } 1346 1347 log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d", 1348 is_closed_archive ? "closed" : "open", 1349 _num_total_subgraph_recordings); 1350 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 1351 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 1352 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 1353 1354 #ifndef PRODUCT 1355 for (int i = 0; i < num; i++) { 1356 ArchivableStaticFieldInfo* f = &fields[i]; 1357 verify_subgraph_from_static_field(f->klass, f->offset); 1358 } 1359 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 1360 #endif 1361 } 1362 1363 // Not all the strings in the global StringTable are dumped into the archive, because 1364 // some of those strings may be only referenced by classes that are excluded from 1365 // the archive. We need to explicitly mark the strings that are: 1366 // [1] used by classes that WILL be archived; 1367 // [2] included in the SharedArchiveConfigFile. 1368 void HeapShared::add_to_dumped_interned_strings(oop string) { 1369 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 1370 bool created; 1371 _dumped_interned_strings->put_if_absent(string, true, &created); 1372 } 1373 1374 // At dump-time, find the location of all the non-null oop pointers in an archived heap 1375 // region. This way we can quickly relocate all the pointers without using 1376 // BasicOopIterateClosure at runtime. 1377 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 1378 narrowOop* _start; 1379 BitMap *_oopmap; 1380 int _num_total_oops; 1381 int _num_null_oops; 1382 public: 1383 FindEmbeddedNonNullPointers(narrowOop* start, BitMap* oopmap) 1384 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 1385 1386 virtual void do_oop(narrowOop* p) { 1387 _num_total_oops ++; 1388 narrowOop v = *p; 1389 if (!CompressedOops::is_null(v)) { 1390 size_t idx = p - _start; 1391 _oopmap->set_bit(idx); 1392 } else { 1393 _num_null_oops ++; 1394 } 1395 } 1396 virtual void do_oop(oop *p) { 1397 ShouldNotReachHere(); 1398 } 1399 int num_total_oops() const { return _num_total_oops; } 1400 int num_null_oops() const { return _num_null_oops; } 1401 }; 1402 1403 ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) { 1404 assert(UseCompressedOops, "must be"); 1405 size_t num_bits = region.byte_size() / sizeof(narrowOop); 1406 ResourceBitMap oopmap(num_bits); 1407 1408 HeapWord* p = region.start(); 1409 HeapWord* end = region.end(); 1410 FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap); 1411 ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL; 1412 1413 int num_objs = 0; 1414 while (p < end) { 1415 oop o = cast_to_oop(p); 1416 o->oop_iterate(&finder); 1417 p += o->size(); 1418 if (DumpSharedSpaces) { 1419 builder->relocate_klass_ptr(o); 1420 } 1421 ++ num_objs; 1422 } 1423 1424 log_info(cds, heap)("calculate_oopmap: objects = %6d, embedded oops = %7d, nulls = %7d", 1425 num_objs, finder.num_total_oops(), finder.num_null_oops()); 1426 return oopmap; 1427 } 1428 1429 // Patch all the embedded oop pointers inside an archived heap region, 1430 // to be consistent with the runtime oop encoding. 1431 class PatchEmbeddedPointers: public BitMapClosure { 1432 narrowOop* _start; 1433 1434 public: 1435 PatchEmbeddedPointers(narrowOop* start) : _start(start) {} 1436 1437 bool do_bit(size_t offset) { 1438 narrowOop* p = _start + offset; 1439 narrowOop v = *p; 1440 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 1441 oop o = HeapShared::decode_from_archive(v); 1442 RawAccess<IS_NOT_NULL>::oop_store(p, o); 1443 return true; 1444 } 1445 }; 1446 1447 void HeapShared::patch_archived_heap_embedded_pointers(MemRegion region, address oopmap, 1448 size_t oopmap_size_in_bits) { 1449 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 1450 1451 #ifndef PRODUCT 1452 ResourceMark rm; 1453 ResourceBitMap checkBm = calculate_oopmap(region); 1454 assert(bm.is_same(checkBm), "sanity"); 1455 #endif 1456 1457 PatchEmbeddedPointers patcher((narrowOop*)region.start()); 1458 bm.iterate(&patcher); 1459 } 1460 1461 #endif // INCLUDE_CDS_JAVA_HEAP