1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotArtifactFinder.hpp" 26 #include "cds/aotClassInitializer.hpp" 27 #include "cds/aotClassLocation.hpp" 28 #include "cds/archiveBuilder.hpp" 29 #include "cds/archiveHeapLoader.hpp" 30 #include "cds/archiveHeapWriter.hpp" 31 #include "cds/archiveUtils.hpp" 32 #include "cds/cdsAccess.hpp" 33 #include "cds/cdsConfig.hpp" 34 #include "cds/cdsEnumKlass.hpp" 35 #include "cds/cdsHeapVerifier.hpp" 36 #include "cds/heapShared.hpp" 37 #include "cds/metaspaceShared.hpp" 38 #include "classfile/classLoaderData.hpp" 39 #include "classfile/classLoaderExt.hpp" 40 #include "classfile/javaClasses.inline.hpp" 41 #include "classfile/modules.hpp" 42 #include "classfile/stringTable.hpp" 43 #include "classfile/symbolTable.hpp" 44 #include "classfile/systemDictionary.hpp" 45 #include "classfile/systemDictionaryShared.hpp" 46 #include "classfile/vmClasses.hpp" 47 #include "classfile/vmSymbols.hpp" 48 #include "gc/shared/collectedHeap.hpp" 49 #include "gc/shared/gcLocker.hpp" 50 #include "gc/shared/gcVMOperations.hpp" 51 #include "logging/log.hpp" 52 #include "logging/logStream.hpp" 53 #include "memory/iterator.inline.hpp" 54 #include "memory/resourceArea.hpp" 55 #include "memory/universe.hpp" 56 #include "oops/compressedOops.inline.hpp" 57 #include "oops/fieldStreams.inline.hpp" 58 #include "oops/objArrayOop.inline.hpp" 59 #include "oops/oop.inline.hpp" 60 #include "oops/typeArrayOop.inline.hpp" 61 #include "prims/jvmtiExport.hpp" 62 #include "runtime/arguments.hpp" 63 #include "runtime/fieldDescriptor.inline.hpp" 64 #include "runtime/init.hpp" 65 #include "runtime/javaCalls.hpp" 66 #include "runtime/mutexLocker.hpp" 67 #include "runtime/safepointVerifiers.hpp" 68 #include "utilities/bitMap.inline.hpp" 69 #include "utilities/copy.hpp" 70 #if INCLUDE_G1GC 71 #include "gc/g1/g1CollectedHeap.hpp" 72 #endif 73 74 #if INCLUDE_CDS_JAVA_HEAP 75 76 struct ArchivableStaticFieldInfo { 77 const char* klass_name; 78 const char* field_name; 79 InstanceKlass* klass; 80 int offset; 81 BasicType type; 82 83 ArchivableStaticFieldInfo(const char* k, const char* f) 84 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {} 85 86 bool valid() { 87 return klass_name != nullptr; 88 } 89 }; 90 91 class HeapShared::ContextMark : public StackObj { 92 ResourceMark rm; 93 public: 94 ContextMark(const char* c) : rm{} { 95 _context->push(c); 96 } 97 ~ContextMark() { 98 _context->pop(); 99 } 100 }; 101 102 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr; 103 104 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS]; 105 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS]; 106 size_t HeapShared::_total_obj_count; 107 size_t HeapShared::_total_obj_size; 108 109 #ifndef PRODUCT 110 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects" 111 static Array<char>* _archived_ArchiveHeapTestClass = nullptr; 112 static const char* _test_class_name = nullptr; 113 static Klass* _test_class = nullptr; 114 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr; 115 #endif 116 117 118 // 119 // If you add new entries to the following tables, you should know what you're doing! 120 // 121 122 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = { 123 {"java/lang/Integer$IntegerCache", "archivedCache"}, 124 {"java/lang/Long$LongCache", "archivedCache"}, 125 {"java/lang/Byte$ByteCache", "archivedCache"}, 126 {"java/lang/Short$ShortCache", "archivedCache"}, 127 {"java/lang/Character$CharacterCache", "archivedCache"}, 128 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 129 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 130 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 131 {"java/util/ImmutableCollections", "archivedObjects"}, 132 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 133 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 134 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 135 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking 136 137 #ifndef PRODUCT 138 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass 139 #endif 140 {nullptr, nullptr}, 141 }; 142 143 // full module graph 144 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = { 145 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 146 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD}, 147 {"java/lang/Module$ArchivedData", "archivedData"}, 148 {nullptr, nullptr}, 149 }; 150 151 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph; 152 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph; 153 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr; 154 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr; 155 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments; 156 int HeapShared::_root_segment_max_size_elems; 157 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1]; 158 MetaspaceObjToOopHandleTable* HeapShared::_scratch_java_mirror_table = nullptr; 159 MetaspaceObjToOopHandleTable* HeapShared::_scratch_references_table = nullptr; 160 161 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) { 162 for (int i = 0; fields[i].valid(); i++) { 163 if (fields[i].klass == ik) { 164 return true; 165 } 166 } 167 return false; 168 } 169 170 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 171 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || 172 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); 173 } 174 175 unsigned HeapShared::oop_hash(oop const& p) { 176 // Do not call p->identity_hash() as that will update the 177 // object header. 178 return primitive_hash(cast_from_oop<intptr_t>(p)); 179 } 180 181 static void reset_states(oop obj, TRAPS) { 182 Handle h_obj(THREAD, obj); 183 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 184 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 185 Symbol* method_sig = vmSymbols::void_method_signature(); 186 187 while (klass != nullptr) { 188 Method* method = klass->find_method(method_name, method_sig); 189 if (method != nullptr) { 190 assert(method->is_private(), "must be"); 191 if (log_is_enabled(Debug, cds)) { 192 ResourceMark rm(THREAD); 193 log_debug(cds)(" calling %s", method->name_and_sig_as_C_string()); 194 } 195 JavaValue result(T_VOID); 196 JavaCalls::call_special(&result, h_obj, klass, 197 method_name, method_sig, CHECK); 198 } 199 klass = klass->java_super(); 200 } 201 } 202 203 void HeapShared::reset_archived_object_states(TRAPS) { 204 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 205 log_debug(cds)("Resetting platform loader"); 206 reset_states(SystemDictionary::java_platform_loader(), CHECK); 207 log_debug(cds)("Resetting system loader"); 208 reset_states(SystemDictionary::java_system_loader(), CHECK); 209 210 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not 211 // directly used for class loading, but rather is used by the core library 212 // to keep track of resources, etc, loaded by the null class loader. 213 // 214 // Note, this object is non-null, and is not the same as 215 // ClassLoaderData::the_null_class_loader_data()->class_loader(), 216 // which is null. 217 log_debug(cds)("Resetting boot loader"); 218 JavaValue result(T_OBJECT); 219 JavaCalls::call_static(&result, 220 vmClasses::jdk_internal_loader_ClassLoaders_klass(), 221 vmSymbols::bootLoader_name(), 222 vmSymbols::void_BuiltinClassLoader_signature(), 223 CHECK); 224 Handle boot_loader(THREAD, result.get_oop()); 225 reset_states(boot_loader(), CHECK); 226 } 227 228 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr; 229 230 bool HeapShared::has_been_archived(oop obj) { 231 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 232 return archived_object_cache()->get(obj) != nullptr; 233 } 234 235 int HeapShared::append_root(oop obj) { 236 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 237 if (obj != nullptr) { 238 assert(has_been_archived(obj), "must be"); 239 } 240 // No GC should happen since we aren't scanning _pending_roots. 241 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 242 243 if (_pending_roots == nullptr) { 244 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500); 245 } 246 247 OopHandle oh(Universe::vm_global(), obj); 248 return _pending_roots->append(oh); 249 } 250 251 objArrayOop HeapShared::root_segment(int segment_idx) { 252 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) { 253 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 254 } else { 255 assert(CDSConfig::is_using_archive(), "must be"); 256 } 257 258 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve(); 259 assert(segment != nullptr, "should have been initialized"); 260 return segment; 261 } 262 263 inline unsigned int oop_handle_hash(const OopHandle& oh) { 264 oop o = oh.resolve(); 265 if (o == nullptr) { 266 return 0; 267 } else { 268 return o->identity_hash(); 269 } 270 } 271 272 inline bool oop_handle_equals(const OopHandle& a, const OopHandle& b) { 273 return a.resolve() == b.resolve(); 274 } 275 276 class OrigToScratchObjectTable: public ResourceHashtable<OopHandle, OopHandle, 277 36137, // prime number 278 AnyObj::C_HEAP, 279 mtClassShared, 280 oop_handle_hash, 281 oop_handle_equals> {}; 282 283 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr; 284 285 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) { 286 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 287 if (_orig_to_scratch_object_table == nullptr) { 288 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable(); 289 } 290 291 OopHandle orig_h(Universe::vm_global(), orig_obj); 292 OopHandle scratch_h(Universe::vm_global(), scratch_obj); 293 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h); 294 } 295 296 oop HeapShared::orig_to_scratch_object(oop orig_obj) { 297 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 298 if (_orig_to_scratch_object_table != nullptr) { 299 OopHandle orig(&orig_obj); 300 OopHandle* v = _orig_to_scratch_object_table->get(orig); 301 if (v != nullptr) { 302 return v->resolve(); 303 } 304 } 305 return nullptr; 306 } 307 308 // Permanent oops are used to support AOT-compiled methods, which may have in-line references 309 // to Strings and MH oops. 310 // 311 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever) 312 // and are accssed vis CDSAccess::get_archived_object(int). 313 struct PermanentOopInfo { 314 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object 315 int _heap_offset; // Offset of the object from the bottom of the archived heap. 316 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {} 317 }; 318 319 class PermanentOopTable: public ResourceHashtable<OopHandle, PermanentOopInfo, 320 36137, // prime number 321 AnyObj::C_HEAP, 322 mtClassShared, 323 oop_handle_hash, 324 oop_handle_equals> {}; 325 326 static int _dumptime_permanent_oop_count = 0; 327 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr; 328 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr; 329 330 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table, 331 // so we can remember their offset (from the bottom of the archived heap). 332 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) { 333 assert_at_safepoint(); 334 if (_dumptime_permanent_oop_table == nullptr) { 335 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable(); 336 } 337 338 PermanentOopInfo info(-1, offset); 339 OopHandle oh(Universe::vm_global(), obj); 340 _dumptime_permanent_oop_table->put_when_absent(oh, info); 341 } 342 343 // A permanent index is assigned to an archived object ONLY when 344 // the AOT compiler calls this function. 345 int HeapShared::get_archived_object_permanent_index(oop obj) { 346 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 347 348 if (!CDSConfig::is_dumping_heap()) { 349 return -1; // Called by the Leyden old workflow 350 } 351 if (_dumptime_permanent_oop_table == nullptr) { 352 return -1; 353 } 354 355 if (_orig_to_scratch_object_table != nullptr) { 356 OopHandle orig(&obj); 357 OopHandle* v = _orig_to_scratch_object_table->get(orig); 358 if (v != nullptr) { 359 obj = v->resolve(); 360 } 361 } 362 363 OopHandle tmp(&obj); 364 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp); 365 if (info == nullptr) { 366 return -1; 367 } else { 368 if (info->_index < 0) { 369 info->_index = _dumptime_permanent_oop_count++; 370 } 371 return info->_index; 372 } 373 } 374 375 oop HeapShared::get_archived_object(int permanent_index) { 376 assert(permanent_index >= 0, "sanity"); 377 assert(ArchiveHeapLoader::is_in_use(), "sanity"); 378 assert(_runtime_permanent_oops != nullptr, "sanity"); 379 380 return _runtime_permanent_oops->at(permanent_index).resolve(); 381 } 382 383 // Remember all archived heap objects that have a permanent index. 384 // table[i] = offset of oop whose permanent index is i. 385 void CachedCodeDirectoryInternal::dumptime_init_internal() { 386 const int count = _dumptime_permanent_oop_count; 387 if (count == 0) { 388 // Avoid confusing CDS code with zero-sized tables, just return. 389 log_info(cds)("No permanent oops"); 390 _permanent_oop_count = count; 391 _permanent_oop_offsets = nullptr; 392 return; 393 } 394 395 int* table = (int*)CDSAccess::allocate_from_code_cache(count * sizeof(int)); 396 for (int i = 0; i < count; i++) { 397 table[count] = -1; 398 } 399 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) { 400 int index = info._index; 401 if (index >= 0) { 402 assert(index < count, "sanity"); 403 table[index] = info._heap_offset; 404 } 405 return true; // continue 406 }); 407 408 for (int i = 0; i < count; i++) { 409 assert(table[i] >= 0, "must be"); 410 } 411 412 log_info(cds)("Dumped %d permanent oops", count); 413 414 _permanent_oop_count = count; 415 CDSAccess::set_pointer(&_permanent_oop_offsets, table); 416 } 417 418 // This is called during the bootstrap of the production run, before any GC can happen. 419 // Record each permanent oop in a OopHandle for GC safety. 420 void CachedCodeDirectoryInternal::runtime_init_internal() { 421 int count = _permanent_oop_count; 422 int* table = _permanent_oop_offsets; 423 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>(); 424 for (int i = 0; i < count; i++) { 425 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]); 426 OopHandle oh(Universe::vm_global(), obj); 427 _runtime_permanent_oops->append(oh); 428 } 429 }; 430 431 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) { 432 assert(_root_segment_max_size_elems > 0, "sanity"); 433 434 // Try to avoid divisions for the common case. 435 if (idx < _root_segment_max_size_elems) { 436 seg_idx = 0; 437 int_idx = idx; 438 } else { 439 seg_idx = idx / _root_segment_max_size_elems; 440 int_idx = idx % _root_segment_max_size_elems; 441 } 442 443 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx, 444 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx); 445 } 446 447 // Returns an objArray that contains all the roots of the archived objects 448 oop HeapShared::get_root(int index, bool clear) { 449 assert(index >= 0, "sanity"); 450 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only"); 451 assert(!_root_segments->is_empty(), "must have loaded shared heap"); 452 int seg_idx, int_idx; 453 get_segment_indexes(index, seg_idx, int_idx); 454 oop result = root_segment(seg_idx)->obj_at(int_idx); 455 if (clear) { 456 clear_root(index); 457 } 458 return result; 459 } 460 461 void HeapShared::clear_root(int index) { 462 assert(index >= 0, "sanity"); 463 assert(CDSConfig::is_using_archive(), "must be"); 464 if (ArchiveHeapLoader::is_in_use()) { 465 int seg_idx, int_idx; 466 get_segment_indexes(index, seg_idx, int_idx); 467 if (log_is_enabled(Debug, cds, heap)) { 468 oop old = root_segment(seg_idx)->obj_at(int_idx); 469 log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 470 } 471 root_segment(seg_idx)->obj_at_put(int_idx, nullptr); 472 } 473 } 474 475 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) { 476 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 477 478 assert(!obj->is_stackChunk(), "do not archive stack chunks"); 479 if (has_been_archived(obj)) { 480 return true; 481 } 482 483 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) { 484 log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu", 485 p2i(obj), obj->size()); 486 debug_trace(); 487 return false; 488 } else { 489 count_allocation(obj->size()); 490 ArchiveHeapWriter::add_source_obj(obj); 491 CachedOopInfo info = make_cached_oop_info(obj, referrer); 492 archived_object_cache()->put_when_absent(obj, info); 493 archived_object_cache()->maybe_grow(); 494 mark_native_pointers(obj); 495 496 Klass* k = obj->klass(); 497 if (k->is_instance_klass()) { 498 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized. 499 // This ensures that during the production run, whenever Java code sees a cached object 500 // of type X, we know that X is already initialized. (see TODO comment below ...) 501 502 if (InstanceKlass::cast(k)->is_enum_subclass() 503 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so 504 // we must store them as AOT-initialized. 505 || (subgraph_info == _dump_time_special_subgraph)) 506 // TODO: we do this only for the special subgraph for now. Extending this to 507 // other subgraphs would require more refactoring of the core library (such as 508 // move some initialization logic into runtimeSetup()). 509 // 510 // For the other subgraphs, we have a weaker mechanism to ensure that 511 // all classes in a subgraph are initialized before the subgraph is programmatically 512 // returned from jdk.internal.misc.CDS::initializeFromArchive(). 513 // See HeapShared::initialize_from_archived_subgraph(). 514 { 515 AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k)); 516 } 517 518 if (java_lang_Class::is_instance(obj)) { 519 Klass* mirror_k = java_lang_Class::as_Klass(obj); 520 if (mirror_k != nullptr) { 521 AOTArtifactFinder::add_cached_class(mirror_k); 522 } 523 } 524 } 525 526 if (log_is_enabled(Debug, cds, heap)) { 527 ResourceMark rm; 528 LogTarget(Debug, cds, heap) log; 529 LogStream out(log); 530 out.print("Archived heap object " PTR_FORMAT " : %s ", 531 p2i(obj), obj->klass()->external_name()); 532 if (java_lang_Class::is_instance(obj)) { 533 Klass* k = java_lang_Class::as_Klass(obj); 534 if (k != nullptr) { 535 out.print("%s", k->external_name()); 536 } else { 537 out.print("primitive"); 538 } 539 } 540 out.cr(); 541 } 542 543 return true; 544 } 545 } 546 547 class MetaspaceObjToOopHandleTable: public ResourceHashtable<MetaspaceObj*, OopHandle, 548 36137, // prime number 549 AnyObj::C_HEAP, 550 mtClassShared> { 551 public: 552 oop get_oop(MetaspaceObj* ptr) { 553 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 554 OopHandle* handle = get(ptr); 555 if (handle != nullptr) { 556 return handle->resolve(); 557 } else { 558 return nullptr; 559 } 560 } 561 void set_oop(MetaspaceObj* ptr, oop o) { 562 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 563 OopHandle handle(Universe::vm_global(), o); 564 bool is_new = put(ptr, handle); 565 assert(is_new, "cannot set twice"); 566 } 567 void remove_oop(MetaspaceObj* ptr) { 568 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 569 OopHandle* handle = get(ptr); 570 if (handle != nullptr) { 571 handle->release(Universe::vm_global()); 572 remove(ptr); 573 } 574 } 575 }; 576 577 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) { 578 if (_scratch_references_table == nullptr) { 579 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable(); 580 } 581 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) { 582 _scratch_references_table->set_oop(src, dest); 583 } 584 } 585 586 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) { 587 return (objArrayOop)_scratch_references_table->get_oop(src); 588 } 589 590 void HeapShared::init_dumping() { 591 _scratch_java_mirror_table = new (mtClass)MetaspaceObjToOopHandleTable(); 592 _scratch_references_table = new (mtClass)MetaspaceObjToOopHandleTable(); 593 } 594 595 void HeapShared::init_scratch_objects(TRAPS) { 596 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 597 BasicType bt = (BasicType)i; 598 if (!is_reference_type(bt)) { 599 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK); 600 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m); 601 track_scratch_object(Universe::java_mirror(bt), m); 602 } 603 } 604 } 605 606 // Given java_mirror that represents a (primitive or reference) type T, 607 // return the "scratch" version that represents the same type T. 608 // Note that if java_mirror will be returned if it's already a 609 // scratch mirror. 610 // 611 // See java_lang_Class::create_scratch_mirror() for more info. 612 oop HeapShared::scratch_java_mirror(oop java_mirror) { 613 assert(java_lang_Class::is_instance(java_mirror), "must be"); 614 615 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 616 BasicType bt = (BasicType)i; 617 if (!is_reference_type(bt)) { 618 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) { 619 return java_mirror; 620 } 621 } 622 } 623 624 if (java_lang_Class::is_primitive(java_mirror)) { 625 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror)); 626 } else { 627 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror)); 628 } 629 } 630 631 oop HeapShared::scratch_java_mirror(BasicType t) { 632 assert((uint)t < T_VOID+1, "range check"); 633 assert(!is_reference_type(t), "sanity"); 634 return _scratch_basic_type_mirrors[t].resolve(); 635 } 636 637 oop HeapShared::scratch_java_mirror(Klass* k) { 638 return _scratch_java_mirror_table->get_oop(k); 639 } 640 641 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) { 642 track_scratch_object(k->java_mirror(), mirror); 643 _scratch_java_mirror_table->set_oop(k, mirror); 644 } 645 646 void HeapShared::remove_scratch_objects(Klass* k) { 647 // Klass is being deallocated. Java mirror can still be alive, and it should not 648 // point to dead klass. We need to break the link from mirror to the Klass. 649 // See how InstanceKlass::deallocate_contents does it for normal mirrors. 650 oop mirror = _scratch_java_mirror_table->get_oop(k); 651 if (mirror != nullptr) { 652 java_lang_Class::set_klass(mirror, nullptr); 653 } 654 _scratch_java_mirror_table->remove_oop(k); 655 if (k->is_instance_klass()) { 656 _scratch_references_table->remove(InstanceKlass::cast(k)->constants()); 657 } 658 if (mirror != nullptr) { 659 OopHandle tmp(&mirror); 660 OopHandle* v = _orig_to_scratch_object_table->get(tmp); 661 if (v != nullptr) { 662 oop scratch_mirror = v->resolve(); 663 java_lang_Class::set_klass(scratch_mirror, nullptr); 664 _orig_to_scratch_object_table->remove(tmp); 665 } 666 } 667 } 668 669 //TODO: we eventually want a more direct test for these kinds of things. 670 //For example the JVM could record some bit of context from the creation 671 //of the klass, such as who called the hidden class factory. Using 672 //string compares on names is fragile and will break as soon as somebody 673 //changes the names in the JDK code. See discussion in JDK-8342481 for 674 //related ideas about marking AOT-related classes. 675 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) { 676 return ik->is_hidden() && 677 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") || 678 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") || 679 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") || 680 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+")); 681 } 682 683 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) { 684 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0); 685 } 686 687 bool HeapShared::is_string_concat_klass(InstanceKlass* ik) { 688 return ik->is_hidden() && ik->name()->starts_with("java/lang/String$$StringConcat"); 689 } 690 691 bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) { 692 return CDSConfig::is_dumping_invokedynamic() && 693 (is_lambda_form_klass(ik) || is_lambda_proxy_klass(ik) || is_string_concat_klass(ik)); 694 } 695 696 697 void HeapShared::copy_and_rescan_aot_inited_mirror(InstanceKlass* ik) { 698 ik->set_has_aot_initialized_mirror(); 699 if (AOTClassInitializer::is_runtime_setup_required(ik)) { 700 ik->set_is_runtime_setup_required(); 701 } 702 703 oop orig_mirror = ik->java_mirror(); 704 oop m = scratch_java_mirror(ik); 705 assert(ik->is_initialized(), "must be"); 706 707 int nfields = 0; 708 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 709 if (fs.access_flags().is_static()) { 710 fieldDescriptor& fd = fs.field_descriptor(); 711 int offset = fd.offset(); 712 switch (fd.field_type()) { 713 case T_OBJECT: 714 case T_ARRAY: 715 { 716 oop field_obj = orig_mirror->obj_field(offset); 717 if (offset == java_lang_Class::reflection_data_offset()) { 718 // Class::reflectData use SoftReference, which cannot be archived. Set it 719 // to null and it will be recreated at runtime. 720 field_obj = nullptr; 721 } 722 m->obj_field_put(offset, field_obj); 723 if (field_obj != nullptr) { 724 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj); 725 assert(success, "sanity"); 726 } 727 } 728 break; 729 case T_BOOLEAN: 730 m->bool_field_put(offset, orig_mirror->bool_field(offset)); 731 break; 732 case T_BYTE: 733 m->byte_field_put(offset, orig_mirror->byte_field(offset)); 734 break; 735 case T_SHORT: 736 m->short_field_put(offset, orig_mirror->short_field(offset)); 737 break; 738 case T_CHAR: 739 m->char_field_put(offset, orig_mirror->char_field(offset)); 740 break; 741 case T_INT: 742 m->int_field_put(offset, orig_mirror->int_field(offset)); 743 break; 744 case T_LONG: 745 m->long_field_put(offset, orig_mirror->long_field(offset)); 746 break; 747 case T_FLOAT: 748 m->float_field_put(offset, orig_mirror->float_field(offset)); 749 break; 750 case T_DOUBLE: 751 m->double_field_put(offset, orig_mirror->double_field(offset)); 752 break; 753 default: 754 ShouldNotReachHere(); 755 } 756 nfields ++; 757 } 758 } 759 760 oop class_data = java_lang_Class::class_data(orig_mirror); 761 java_lang_Class::set_class_data(m, class_data); 762 if (class_data != nullptr) { 763 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, class_data); 764 assert(success, "sanity"); 765 } 766 767 if (log_is_enabled(Info, cds, init)) { 768 ResourceMark rm; 769 log_debug(cds, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(), 770 ik->is_hidden() ? " (hidden)" : "", 771 ik->is_enum_subclass() ? " (enum)" : ""); 772 } 773 } 774 775 static void copy_java_mirror_hashcode(oop orig_mirror, oop scratch_m) { 776 // We need to retain the identity_hash, because it may have been used by some hashtables 777 // in the shared heap. 778 if (!orig_mirror->fast_no_hash_check()) { 779 intptr_t src_hash = orig_mirror->identity_hash(); 780 if (UseCompactObjectHeaders) { 781 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass()); 782 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); 783 } else { 784 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash)); 785 } 786 assert(scratch_m->mark().is_unlocked(), "sanity"); 787 788 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash()); 789 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash); 790 } 791 } 792 793 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) { 794 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) { 795 objArrayOop rr = src_ik->constants()->resolved_references_or_null(); 796 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) { 797 return HeapShared::scratch_resolved_references(src_ik->constants()); 798 } 799 } 800 return nullptr; 801 } 802 803 void HeapShared::archive_strings() { 804 oop shared_strings_array = StringTable::init_shared_strings_array(_dumped_interned_strings); 805 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array); 806 // We must succeed because: 807 // - _dumped_interned_strings do not contain any large strings. 808 // - StringTable::init_shared_table() doesn't create any large arrays. 809 assert(success, "shared strings array must not point to arrays or strings that are too large to archive"); 810 StringTable::set_shared_strings_array_index(append_root(shared_strings_array)); 811 } 812 813 int HeapShared::archive_exception_instance(oop exception) { 814 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception); 815 assert(success, "sanity"); 816 return append_root(exception); 817 } 818 819 void HeapShared::mark_native_pointers(oop orig_obj) { 820 if (java_lang_Class::is_instance(orig_obj)) { 821 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset()); 822 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset()); 823 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) { 824 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset()); 825 } 826 } 827 828 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) { 829 CachedOopInfo* info = archived_object_cache()->get(src_obj); 830 assert(info != nullptr, "must be"); 831 has_oop_pointers = info->has_oop_pointers(); 832 has_native_pointers = info->has_native_pointers(); 833 } 834 835 void HeapShared::set_has_native_pointers(oop src_obj) { 836 CachedOopInfo* info = archived_object_cache()->get(src_obj); 837 assert(info != nullptr, "must be"); 838 info->set_has_native_pointers(); 839 } 840 841 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that 842 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder. 843 void HeapShared::start_scanning_for_oops() { 844 { 845 NoSafepointVerifier nsv; 846 847 // The special subgraph doesn't belong to any class. We use Object_klass() here just 848 // for convenience. 849 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false); 850 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250); 851 852 // Cache for recording where the archived objects are copied to 853 create_archived_object_cache(); 854 855 if (UseCompressedOops || UseG1GC) { 856 log_info(cds)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 857 UseCompressedOops ? p2i(CompressedOops::begin()) : 858 p2i((address)G1CollectedHeap::heap()->reserved().start()), 859 UseCompressedOops ? p2i(CompressedOops::end()) : 860 p2i((address)G1CollectedHeap::heap()->reserved().end())); 861 } 862 863 archive_subgraphs(); 864 } 865 866 init_seen_objects_table(); 867 Universe::archive_exception_instances(); 868 } 869 870 void HeapShared::end_scanning_for_oops() { 871 archive_strings(); 872 delete_seen_objects_table(); 873 } 874 875 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) { 876 { 877 NoSafepointVerifier nsv; 878 if (!SkipArchiveHeapVerification) { 879 CDSHeapVerifier::verify(); 880 } 881 check_special_subgraph_classes(); 882 } 883 884 StringTable::write_shared_table(_dumped_interned_strings); 885 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length()); 886 for (int i = 0; i < _pending_roots->length(); i++) { 887 roots->append(_pending_roots->at(i).resolve()); 888 } 889 ArchiveHeapWriter::write(roots, heap_info); 890 delete roots; 891 892 ArchiveBuilder::OtherROAllocMark mark; 893 write_subgraph_info_table(); 894 } 895 896 void HeapShared::scan_java_mirror(oop orig_mirror) { 897 oop m = scratch_java_mirror(orig_mirror); 898 if (m != nullptr) { // nullptr if for custom class loader 899 copy_java_mirror_hashcode(orig_mirror, m); 900 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m); 901 assert(success, "sanity"); 902 } 903 } 904 905 void HeapShared::scan_java_class(Klass* orig_k) { 906 scan_java_mirror(orig_k->java_mirror()); 907 908 if (orig_k->is_instance_klass()) { 909 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k); 910 orig_ik->constants()->prepare_resolved_references_for_archiving(); 911 objArrayOop rr = get_archived_resolved_references(orig_ik); 912 if (rr != nullptr) { 913 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr); 914 assert(success, "must be"); 915 } 916 917 orig_ik->constants()->add_dumped_interned_strings(); 918 } 919 } 920 921 void HeapShared::archive_subgraphs() { 922 assert(CDSConfig::is_dumping_heap(), "must be"); 923 924 archive_object_subgraphs(archive_subgraph_entry_fields, 925 false /* is_full_module_graph */); 926 927 if (CDSConfig::is_dumping_full_module_graph()) { 928 archive_object_subgraphs(fmg_archive_subgraph_entry_fields, 929 true /* is_full_module_graph */); 930 Modules::verify_archived_modules(); 931 } 932 } 933 934 // 935 // Subgraph archiving support 936 // 937 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr; 938 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 939 940 // Get the subgraph_info for Klass k. A new subgraph_info is created if 941 // there is no existing one for k. The subgraph_info records the "buffered" 942 // address of the class. 943 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 944 assert(CDSConfig::is_dumping_heap(), "dump time only"); 945 bool created; 946 KlassSubGraphInfo* info = 947 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k, is_full_module_graph), 948 &created); 949 assert(created, "must not initialize twice"); 950 return info; 951 } 952 953 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 954 assert(CDSConfig::is_dumping_heap(), "dump time only"); 955 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 956 assert(info != nullptr, "must have been initialized"); 957 return info; 958 } 959 960 // Add an entry field to the current KlassSubGraphInfo. 961 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { 962 assert(CDSConfig::is_dumping_heap(), "dump time only"); 963 if (_subgraph_entry_fields == nullptr) { 964 _subgraph_entry_fields = 965 new (mtClass) GrowableArray<int>(10, mtClass); 966 } 967 _subgraph_entry_fields->append(static_field_offset); 968 _subgraph_entry_fields->append(HeapShared::append_root(v)); 969 } 970 971 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 972 // Only objects of boot classes can be included in sub-graph. 973 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 974 assert(CDSConfig::is_dumping_heap(), "dump time only"); 975 976 if (_subgraph_object_klasses == nullptr) { 977 _subgraph_object_klasses = 978 new (mtClass) GrowableArray<Klass*>(50, mtClass); 979 } 980 981 if (_k == orig_k) { 982 // Don't add the Klass containing the sub-graph to it's own klass 983 // initialization list. 984 return; 985 } 986 987 if (orig_k->is_instance_klass()) { 988 #ifdef ASSERT 989 InstanceKlass* ik = InstanceKlass::cast(orig_k); 990 if (CDSConfig::is_dumping_invokedynamic()) { 991 assert(ik->class_loader() == nullptr || 992 HeapShared::is_lambda_proxy_klass(ik), 993 "we can archive only instances of boot classes or lambda proxy classes"); 994 } else { 995 assert(ik->class_loader() == nullptr, "must be boot class"); 996 } 997 #endif 998 // vmClasses::xxx_klass() are not updated, need to check 999 // the original Klass* 1000 if (orig_k == vmClasses::String_klass() || 1001 orig_k == vmClasses::Object_klass()) { 1002 // Initialized early during VM initialization. No need to be added 1003 // to the sub-graph object class list. 1004 return; 1005 } 1006 check_allowed_klass(InstanceKlass::cast(orig_k)); 1007 } else if (orig_k->is_objArray_klass()) { 1008 Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass(); 1009 if (abk->is_instance_klass()) { 1010 assert(InstanceKlass::cast(abk)->is_shared_boot_class(), 1011 "must be boot class"); 1012 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass())); 1013 } 1014 if (orig_k == Universe::objectArrayKlass()) { 1015 // Initialized early during Universe::genesis. No need to be added 1016 // to the list. 1017 return; 1018 } 1019 } else { 1020 assert(orig_k->is_typeArray_klass(), "must be"); 1021 // Primitive type arrays are created early during Universe::genesis. 1022 return; 1023 } 1024 1025 if (log_is_enabled(Debug, cds, heap)) { 1026 if (!_subgraph_object_klasses->contains(orig_k)) { 1027 ResourceMark rm; 1028 log_debug(cds, heap)("Adding klass %s", orig_k->external_name()); 1029 } 1030 } 1031 1032 _subgraph_object_klasses->append_if_missing(orig_k); 1033 _has_non_early_klasses |= is_non_early_klass(orig_k); 1034 } 1035 1036 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { 1037 if (CDSConfig::is_dumping_invokedynamic()) { 1038 // FIXME -- this allows LambdaProxy classes 1039 return; 1040 } 1041 if (ik->module()->name() == vmSymbols::java_base()) { 1042 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package"); 1043 return; 1044 } 1045 1046 const char* lambda_msg = ""; 1047 if (CDSConfig::is_dumping_invokedynamic()) { 1048 lambda_msg = ", or a lambda proxy class"; 1049 if (HeapShared::is_lambda_proxy_klass(ik) && 1050 (ik->class_loader() == nullptr || 1051 ik->class_loader() == SystemDictionary::java_platform_loader() || 1052 ik->class_loader() == SystemDictionary::java_system_loader())) { 1053 return; 1054 } 1055 } 1056 1057 #ifndef PRODUCT 1058 if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) { 1059 // This class is loaded by ArchiveHeapTestClass 1060 return; 1061 } 1062 const char* testcls_msg = ", or a test class in an unnamed package of an unnamed module"; 1063 #else 1064 const char* testcls_msg = ""; 1065 #endif 1066 1067 ResourceMark rm; 1068 log_error(cds, heap)("Class %s not allowed in archive heap. Must be in java.base%s%s", 1069 ik->external_name(), lambda_msg, testcls_msg); 1070 MetaspaceShared::unrecoverable_writing_error(); 1071 } 1072 1073 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 1074 if (k->is_objArray_klass()) { 1075 k = ObjArrayKlass::cast(k)->bottom_klass(); 1076 } 1077 if (k->is_instance_klass()) { 1078 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 1079 ResourceMark rm; 1080 log_info(cds, heap)("non-early: %s", k->external_name()); 1081 return true; 1082 } else { 1083 return false; 1084 } 1085 } else { 1086 return false; 1087 } 1088 } 1089 1090 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 1091 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 1092 _k = ArchiveBuilder::get_buffered_klass(info->klass()); 1093 _entry_field_records = nullptr; 1094 _subgraph_object_klasses = nullptr; 1095 _is_full_module_graph = info->is_full_module_graph(); 1096 1097 if (_is_full_module_graph) { 1098 // Consider all classes referenced by the full module graph as early -- we will be 1099 // allocating objects of these classes during JVMTI early phase, so they cannot 1100 // be processed by (non-early) JVMTI ClassFileLoadHook 1101 _has_non_early_klasses = false; 1102 } else { 1103 _has_non_early_klasses = info->has_non_early_klasses(); 1104 } 1105 1106 if (_has_non_early_klasses) { 1107 ResourceMark rm; 1108 log_info(cds, heap)( 1109 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 1110 _k->external_name()); 1111 } 1112 1113 // populate the entry fields 1114 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 1115 if (entry_fields != nullptr) { 1116 int num_entry_fields = entry_fields->length(); 1117 assert(num_entry_fields % 2 == 0, "sanity"); 1118 _entry_field_records = 1119 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 1120 for (int i = 0 ; i < num_entry_fields; i++) { 1121 _entry_field_records->at_put(i, entry_fields->at(i)); 1122 } 1123 } 1124 1125 // <recorded_klasses> has the Klasses of all the objects that are referenced by this subgraph. 1126 // Copy those that need to be explicitly initialized into <_subgraph_object_klasses>. 1127 GrowableArray<Klass*>* recorded_klasses = info->subgraph_object_klasses(); 1128 if (recorded_klasses != nullptr) { 1129 // AOT-inited classes are automatically marked as "initialized" during bootstrap. When 1130 // programmatically loading a subgraph, we only need to explicitly initialize the classes 1131 // that are not aot-inited. 1132 int num_to_copy = 0; 1133 for (int i = 0; i < recorded_klasses->length(); i++) { 1134 Klass* subgraph_k = ArchiveBuilder::get_buffered_klass(recorded_klasses->at(i)); 1135 if (!subgraph_k->has_aot_initialized_mirror()) { 1136 num_to_copy ++; 1137 } 1138 } 1139 1140 _subgraph_object_klasses = ArchiveBuilder::new_ro_array<Klass*>(num_to_copy); 1141 bool is_special = (_k == ArchiveBuilder::get_buffered_klass(vmClasses::Object_klass())); 1142 for (int i = 0, n = 0; i < recorded_klasses->length(); i++) { 1143 Klass* subgraph_k = ArchiveBuilder::get_buffered_klass(recorded_klasses->at(i)); 1144 if (subgraph_k->has_aot_initialized_mirror()) { 1145 continue; 1146 } 1147 if (log_is_enabled(Info, cds, heap)) { 1148 ResourceMark rm; 1149 const char* owner_name = is_special ? "<special>" : _k->external_name(); 1150 if (subgraph_k->is_instance_klass()) { 1151 InstanceKlass* src_ik = InstanceKlass::cast(ArchiveBuilder::current()->get_source_addr(subgraph_k)); 1152 } 1153 log_info(cds, heap)( 1154 "Archived object klass %s (%2d) => %s", 1155 owner_name, n, subgraph_k->external_name()); 1156 } 1157 _subgraph_object_klasses->at_put(n, subgraph_k); 1158 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(n)); 1159 n++; 1160 } 1161 } 1162 1163 ArchivePtrMarker::mark_pointer(&_k); 1164 ArchivePtrMarker::mark_pointer(&_entry_field_records); 1165 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 1166 } 1167 1168 class HeapShared::CopyKlassSubGraphInfoToArchive : StackObj { 1169 CompactHashtableWriter* _writer; 1170 public: 1171 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 1172 1173 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 1174 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) { 1175 ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info); 1176 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass); 1177 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k); 1178 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 1179 _writer->add(hash, delta); 1180 } 1181 return true; // keep on iterating 1182 } 1183 }; 1184 1185 ArchivedKlassSubGraphInfoRecord* HeapShared::archive_subgraph_info(KlassSubGraphInfo* info) { 1186 ArchivedKlassSubGraphInfoRecord* record = 1187 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 1188 record->init(info); 1189 if (info == _dump_time_special_subgraph) { 1190 _run_time_special_subgraph = record; 1191 } 1192 return record; 1193 } 1194 1195 // Build the records of archived subgraph infos, which include: 1196 // - Entry points to all subgraphs from the containing class mirror. The entry 1197 // points are static fields in the mirror. For each entry point, the field 1198 // offset, and value are recorded in the sub-graph 1199 // info. The value is stored back to the corresponding field at runtime. 1200 // - A list of klasses that need to be loaded/initialized before archived 1201 // java object sub-graph can be accessed at runtime. 1202 void HeapShared::write_subgraph_info_table() { 1203 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 1204 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 1205 CompactHashtableStats stats; 1206 1207 _run_time_subgraph_info_table.reset(); 1208 1209 CompactHashtableWriter writer(d_table->_count, &stats); 1210 CopyKlassSubGraphInfoToArchive copy(&writer); 1211 d_table->iterate(©); 1212 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 1213 1214 #ifndef PRODUCT 1215 if (ArchiveHeapTestClass != nullptr) { 1216 size_t len = strlen(ArchiveHeapTestClass) + 1; 1217 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len); 1218 strncpy(array->adr_at(0), ArchiveHeapTestClass, len); 1219 _archived_ArchiveHeapTestClass = array; 1220 } 1221 #endif 1222 if (log_is_enabled(Info, cds, heap)) { 1223 print_stats(); 1224 } 1225 } 1226 1227 void HeapShared::add_root_segment(objArrayOop segment_oop) { 1228 assert(segment_oop != nullptr, "must be"); 1229 assert(ArchiveHeapLoader::is_in_use(), "must be"); 1230 if (_root_segments == nullptr) { 1231 _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10); 1232 } 1233 _root_segments->push(OopHandle(Universe::vm_global(), segment_oop)); 1234 } 1235 1236 void HeapShared::init_root_segment_sizes(int max_size_elems) { 1237 _root_segment_max_size_elems = max_size_elems; 1238 } 1239 1240 void HeapShared::serialize_tables(SerializeClosure* soc) { 1241 1242 #ifndef PRODUCT 1243 soc->do_ptr(&_archived_ArchiveHeapTestClass); 1244 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) { 1245 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0); 1246 setup_test_class(_test_class_name); 1247 } 1248 #endif 1249 1250 _run_time_subgraph_info_table.serialize_header(soc); 1251 soc->do_ptr(&_run_time_special_subgraph); 1252 } 1253 1254 static void verify_the_heap(Klass* k, const char* which) { 1255 if (VerifyArchivedFields > 0) { 1256 ResourceMark rm; 1257 log_info(cds, heap)("Verify heap %s initializing static field(s) in %s", 1258 which, k->external_name()); 1259 1260 VM_Verify verify_op; 1261 VMThread::execute(&verify_op); 1262 1263 if (VerifyArchivedFields > 1 && is_init_completed()) { 1264 // At this time, the oop->klass() of some archived objects in the heap may not 1265 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 1266 // have enough information (object size, oop maps, etc) so that a GC can be safely 1267 // performed. 1268 // 1269 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 1270 // to check for GC safety. 1271 log_info(cds, heap)("Trigger GC %s initializing static field(s) in %s", 1272 which, k->external_name()); 1273 FlagSetting fs1(VerifyBeforeGC, true); 1274 FlagSetting fs2(VerifyDuringGC, true); 1275 FlagSetting fs3(VerifyAfterGC, true); 1276 Universe::heap()->collect(GCCause::_java_lang_system_gc); 1277 } 1278 } 1279 } 1280 1281 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 1282 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 1283 // 1284 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 1285 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 1286 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 1287 void HeapShared::resolve_classes(JavaThread* current) { 1288 assert(CDSConfig::is_using_archive(), "runtime only!"); 1289 if (!ArchiveHeapLoader::is_in_use()) { 1290 return; // nothing to do 1291 } 1292 1293 if (!CDSConfig::is_using_aot_linked_classes()) { 1294 assert( _run_time_special_subgraph != nullptr, "must be"); 1295 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses(); 1296 if (klasses != nullptr) { 1297 for (int i = 0; i < klasses->length(); i++) { 1298 Klass* k = klasses->at(i); 1299 ExceptionMark em(current); // no exception can happen here 1300 resolve_or_init(k, /*do_init*/false, current); 1301 } 1302 } 1303 } 1304 1305 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); 1306 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); 1307 } 1308 1309 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { 1310 for (int i = 0; fields[i].valid(); i++) { 1311 ArchivableStaticFieldInfo* info = &fields[i]; 1312 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1313 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 1314 assert(k != nullptr && k->is_shared_boot_class(), "sanity"); 1315 resolve_classes_for_subgraph_of(current, k); 1316 } 1317 } 1318 1319 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) { 1320 JavaThread* THREAD = current; 1321 ExceptionMark em(THREAD); 1322 const ArchivedKlassSubGraphInfoRecord* record = 1323 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 1324 if (HAS_PENDING_EXCEPTION) { 1325 CLEAR_PENDING_EXCEPTION; 1326 } 1327 if (record == nullptr) { 1328 clear_archived_roots_of(k); 1329 } 1330 } 1331 1332 void HeapShared::initialize_java_lang_invoke(TRAPS) { 1333 if (CDSConfig::is_loading_invokedynamic() || CDSConfig::is_dumping_invokedynamic()) { 1334 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK); 1335 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK); 1336 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK); 1337 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK); 1338 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK); 1339 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK); 1340 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK); 1341 } 1342 } 1343 1344 // Initialize the InstanceKlasses of objects that are reachable from the following roots: 1345 // - interned strings 1346 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses. 1347 // - ConstantPool::resolved_references() 1348 // - Universe::<xxx>_exception_instance() 1349 // 1350 // For example, if this enum class is initialized at AOT cache assembly time: 1351 // 1352 // enum Fruit { 1353 // APPLE, ORANGE, BANANA; 1354 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE)); 1355 // } 1356 // 1357 // the aot-initialized mirror of Fruit has a static field that references HashSet, which 1358 // should be initialized before any Java code can access the Fruit class. Note that 1359 // HashSet itself doesn't necessary need to be an aot-initialized class. 1360 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) { 1361 if (!ArchiveHeapLoader::is_in_use()) { 1362 return; 1363 } 1364 1365 assert( _run_time_special_subgraph != nullptr, "must be"); 1366 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses(); 1367 if (klasses != nullptr) { 1368 for (int pass = 0; pass < 2; pass ++) { 1369 for (int i = 0; i < klasses->length(); i++) { 1370 Klass* k = klasses->at(i); 1371 if (k->class_loader_data() == nullptr) { 1372 // This class is not yet loaded. We will initialize it in a later phase. 1373 // For example, we have loaded only AOTLinkedClassCategory::BOOT1 classes 1374 // but k is part of AOTLinkedClassCategory::BOOT2. 1375 continue; 1376 } 1377 if (k->class_loader() == class_loader()) { 1378 if (pass == 0) { 1379 if (k->is_instance_klass()) { 1380 InstanceKlass::cast(k)->link_class(CHECK); 1381 } 1382 } else { 1383 resolve_or_init(k, /*do_init*/true, CHECK); 1384 } 1385 } 1386 } 1387 } 1388 } 1389 } 1390 1391 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) { 1392 JavaThread* THREAD = current; 1393 if (!ArchiveHeapLoader::is_in_use()) { 1394 return; // nothing to do 1395 } 1396 1397 if (k->name()->equals("jdk/internal/module/ArchivedModuleGraph") && 1398 !CDSConfig::is_using_optimized_module_handling() && 1399 // archive was created with --module-path 1400 AOTClassLocationConfig::runtime()->num_module_paths() > 0) { 1401 // ArchivedModuleGraph was created with a --module-path that's different than the runtime --module-path. 1402 // Thus, it might contain references to modules that do not exist at runtime. We cannot use it. 1403 log_info(cds, heap)("Skip initializing ArchivedModuleGraph subgraph: is_using_optimized_module_handling=%s num_module_paths=%d", 1404 BOOL_TO_STR(CDSConfig::is_using_optimized_module_handling()), 1405 AOTClassLocationConfig::runtime()->num_module_paths()); 1406 return; 1407 } 1408 1409 ExceptionMark em(THREAD); 1410 const ArchivedKlassSubGraphInfoRecord* record = 1411 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 1412 1413 if (HAS_PENDING_EXCEPTION) { 1414 CLEAR_PENDING_EXCEPTION; 1415 // None of the field value will be set if there was an exception when initializing the classes. 1416 // The java code will not see any of the archived objects in the 1417 // subgraphs referenced from k in this case. 1418 return; 1419 } 1420 1421 if (record != nullptr) { 1422 init_archived_fields_for(k, record); 1423 } 1424 } 1425 1426 const ArchivedKlassSubGraphInfoRecord* 1427 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 1428 assert(!CDSConfig::is_dumping_heap(), "Should not be called when dumping heap"); 1429 1430 if (!k->is_shared()) { 1431 return nullptr; 1432 } 1433 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1434 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1435 1436 #ifndef PRODUCT 1437 if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) { 1438 _test_class = k; 1439 _test_class_record = record; 1440 } 1441 #endif 1442 1443 // Initialize from archived data. Currently this is done only 1444 // during VM initialization time. No lock is needed. 1445 if (record == nullptr) { 1446 if (log_is_enabled(Info, cds, heap)) { 1447 ResourceMark rm(THREAD); 1448 log_info(cds, heap)("subgraph %s is not recorded", 1449 k->external_name()); 1450 } 1451 return nullptr; 1452 } else { 1453 if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) { 1454 if (log_is_enabled(Info, cds, heap)) { 1455 ResourceMark rm(THREAD); 1456 log_info(cds, heap)("subgraph %s cannot be used because full module graph is disabled", 1457 k->external_name()); 1458 } 1459 return nullptr; 1460 } 1461 1462 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 1463 if (log_is_enabled(Info, cds, heap)) { 1464 ResourceMark rm(THREAD); 1465 log_info(cds, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 1466 k->external_name()); 1467 } 1468 return nullptr; 1469 } 1470 1471 if (log_is_enabled(Info, cds, heap)) { 1472 ResourceMark rm; 1473 log_info(cds, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name()); 1474 } 1475 1476 resolve_or_init(k, do_init, CHECK_NULL); 1477 1478 // Load/link/initialize the klasses of the objects in the subgraph. 1479 // nullptr class loader is used. 1480 Array<Klass*>* klasses = record->subgraph_object_klasses(); 1481 if (klasses != nullptr) { 1482 for (int i = 0; i < klasses->length(); i++) { 1483 Klass* klass = klasses->at(i); 1484 if (!klass->is_shared()) { 1485 return nullptr; 1486 } 1487 resolve_or_init(klass, do_init, CHECK_NULL); 1488 } 1489 } 1490 } 1491 1492 return record; 1493 } 1494 1495 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) { 1496 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name); 1497 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym); 1498 if (k == nullptr) { 1499 return; 1500 } 1501 assert(k->is_shared_boot_class(), "sanity"); 1502 resolve_or_init(k, false, CHECK); 1503 if (do_init) { 1504 resolve_or_init(k, true, CHECK); 1505 } 1506 } 1507 1508 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 1509 if (!do_init) { 1510 if (k->class_loader_data() == nullptr) { 1511 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 1512 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 1513 } 1514 } else { 1515 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes"); 1516 if (k->is_instance_klass()) { 1517 InstanceKlass* ik = InstanceKlass::cast(k); 1518 ik->initialize(CHECK); 1519 } else if (k->is_objArray_klass()) { 1520 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 1521 oak->initialize(CHECK); 1522 } 1523 } 1524 } 1525 1526 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 1527 verify_the_heap(k, "before"); 1528 1529 // Load the subgraph entry fields from the record and store them back to 1530 // the corresponding fields within the mirror. 1531 oop m = k->java_mirror(); 1532 Array<int>* entry_field_records = record->entry_field_records(); 1533 if (entry_field_records != nullptr) { 1534 int efr_len = entry_field_records->length(); 1535 assert(efr_len % 2 == 0, "sanity"); 1536 for (int i = 0; i < efr_len; i += 2) { 1537 int field_offset = entry_field_records->at(i); 1538 int root_index = entry_field_records->at(i+1); 1539 oop v = get_root(root_index, /*clear=*/true); 1540 if (k->has_aot_initialized_mirror()) { 1541 assert(v == m->obj_field(field_offset), "must be aot-initialized"); 1542 } else { 1543 m->obj_field_put(field_offset, v); 1544 } 1545 log_debug(cds, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 1546 } 1547 1548 // Done. Java code can see the archived sub-graphs referenced from k's 1549 // mirror after this point. 1550 if (log_is_enabled(Info, cds, heap)) { 1551 ResourceMark rm; 1552 log_info(cds, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s", 1553 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "", 1554 k->has_aot_initialized_mirror() ? " (aot-inited)" : ""); 1555 } 1556 } 1557 1558 verify_the_heap(k, "after "); 1559 } 1560 1561 void HeapShared::clear_archived_roots_of(Klass* k) { 1562 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1563 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1564 if (record != nullptr) { 1565 Array<int>* entry_field_records = record->entry_field_records(); 1566 if (entry_field_records != nullptr) { 1567 int efr_len = entry_field_records->length(); 1568 assert(efr_len % 2 == 0, "sanity"); 1569 for (int i = 0; i < efr_len; i += 2) { 1570 int root_index = entry_field_records->at(i+1); 1571 clear_root(root_index); 1572 } 1573 } 1574 } 1575 } 1576 1577 // Push all oops that are referenced by _referencing_obj onto the _stack. 1578 class HeapShared::ReferentPusher: public BasicOopIterateClosure { 1579 PendingOopStack* _stack; 1580 GrowableArray<oop> _found_oop_fields; 1581 int _level; 1582 bool _record_klasses_only; 1583 KlassSubGraphInfo* _subgraph_info; 1584 oop _referencing_obj; 1585 public: 1586 ReferentPusher(PendingOopStack* stack, 1587 int level, 1588 bool record_klasses_only, 1589 KlassSubGraphInfo* subgraph_info, 1590 oop orig) : 1591 _stack(stack), 1592 _found_oop_fields(), 1593 _level(level), 1594 _record_klasses_only(record_klasses_only), 1595 _subgraph_info(subgraph_info), 1596 _referencing_obj(orig) { 1597 } 1598 void do_oop(narrowOop *p) { ReferentPusher::do_oop_work(p); } 1599 void do_oop( oop *p) { ReferentPusher::do_oop_work(p); } 1600 1601 ~ReferentPusher() { 1602 while (_found_oop_fields.length() > 0) { 1603 // This produces the exact same traversal order as the previous version 1604 // of ReferentPusher that recurses on the C stack -- a depth-first search, 1605 // walking the oop fields in _referencing_obj by ascending field offsets. 1606 oop obj = _found_oop_fields.pop(); 1607 _stack->push(PendingOop(obj, _referencing_obj, _level + 1)); 1608 } 1609 } 1610 1611 protected: 1612 template <class T> void do_oop_work(T *p) { 1613 oop obj = RawAccess<>::oop_load(p); 1614 if (!CompressedOops::is_null(obj)) { 1615 size_t field_delta = pointer_delta(p, _referencing_obj, sizeof(char)); 1616 1617 if (!_record_klasses_only && log_is_enabled(Debug, cds, heap)) { 1618 ResourceMark rm; 1619 log_debug(cds, heap)("(%d) %s[%zu] ==> " PTR_FORMAT " size %zu %s", _level, 1620 _referencing_obj->klass()->external_name(), field_delta, 1621 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1622 if (log_is_enabled(Trace, cds, heap)) { 1623 LogTarget(Trace, cds, heap) log; 1624 LogStream out(log); 1625 obj->print_on(&out); 1626 } 1627 } 1628 1629 _found_oop_fields.push(obj); 1630 } 1631 } 1632 1633 public: 1634 oop referencing_obj() { return _referencing_obj; } 1635 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1636 }; 1637 1638 1639 // Checks if an oop has any non-null oop fields 1640 class PointsToOopsChecker : public BasicOopIterateClosure { 1641 bool _result; 1642 1643 template <class T> void check(T *p) { 1644 _result |= (HeapAccess<>::oop_load(p) != nullptr); 1645 } 1646 1647 public: 1648 PointsToOopsChecker() : _result(false) {} 1649 void do_oop(narrowOop *p) { check(p); } 1650 void do_oop( oop *p) { check(p); } 1651 bool result() { return _result; } 1652 }; 1653 1654 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) { 1655 PointsToOopsChecker points_to_oops_checker; 1656 obj->oop_iterate(&points_to_oops_checker); 1657 return CachedOopInfo(referrer, points_to_oops_checker.result()); 1658 } 1659 1660 void HeapShared::init_box_classes(TRAPS) { 1661 if (ArchiveHeapLoader::is_in_use()) { 1662 vmClasses::Boolean_klass()->initialize(CHECK); 1663 vmClasses::Character_klass()->initialize(CHECK); 1664 vmClasses::Float_klass()->initialize(CHECK); 1665 vmClasses::Double_klass()->initialize(CHECK); 1666 vmClasses::Byte_klass()->initialize(CHECK); 1667 vmClasses::Short_klass()->initialize(CHECK); 1668 vmClasses::Integer_klass()->initialize(CHECK); 1669 vmClasses::Long_klass()->initialize(CHECK); 1670 vmClasses::Void_klass()->initialize(CHECK); 1671 } 1672 } 1673 1674 void HeapShared::exit_on_error() { 1675 if (_context != nullptr) { 1676 ResourceMark rm; 1677 LogStream ls(Log(cds, heap)::error()); 1678 ls.print_cr("Context"); 1679 for (int i = 0; i < _context->length(); i++) { 1680 const char* s = _context->at(i); 1681 ls.print_cr("- %s", s); 1682 } 1683 } 1684 debug_trace(); 1685 MetaspaceShared::unrecoverable_writing_error(); 1686 } 1687 1688 // (1) If orig_obj has not been archived yet, archive it. 1689 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1690 // trace all objects that are reachable from it, and make sure these objects are archived. 1691 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that 1692 // were already archived when this function is called) 1693 bool HeapShared::archive_reachable_objects_from(int level, 1694 KlassSubGraphInfo* subgraph_info, 1695 oop orig_obj) { 1696 PendingOopStack stack; 1697 stack.push(PendingOop(orig_obj, nullptr, level)); 1698 1699 while (stack.length() > 0) { 1700 PendingOop po = stack.pop(); 1701 _object_being_archived = po; 1702 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer()); 1703 _object_being_archived = PendingOop(); 1704 1705 if (!status) { 1706 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1707 // as the Java code will take care of initializing this field dynamically. 1708 assert(level == 1, "VM should have exited with unarchivable objects for _level > 1"); 1709 return false; 1710 } 1711 } 1712 1713 return true; 1714 } 1715 1716 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info, 1717 oop orig_obj, oop referrer) { 1718 assert(orig_obj != nullptr, "must be"); 1719 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1720 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1721 // If you get an error here, you probably made a change in the JDK library that has added 1722 // these objects that are referenced (directly or indirectly) by static fields. 1723 ResourceMark rm; 1724 log_error(cds, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name()); 1725 exit_on_error(); 1726 } 1727 1728 if (log_is_enabled(Debug, cds, heap) && java_lang_Class::is_instance(orig_obj)) { 1729 ResourceMark rm; 1730 LogTarget(Debug, cds, heap) log; 1731 LogStream out(log); 1732 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj)); 1733 Klass* k = java_lang_Class::as_Klass(orig_obj); 1734 if (k != nullptr) { 1735 out.print("%s", k->external_name()); 1736 } else { 1737 out.print("primitive"); 1738 } 1739 out.print_cr("; scratch mirror = " PTR_FORMAT, 1740 p2i(scratch_java_mirror(orig_obj))); 1741 } 1742 1743 if (CDSConfig::is_initing_classes_at_dump_time()) { 1744 if (java_lang_Class::is_instance(orig_obj)) { 1745 orig_obj = scratch_java_mirror(orig_obj); 1746 assert(orig_obj != nullptr, "must be archived"); 1747 } 1748 } else if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _dump_time_special_subgraph) { 1749 // Without CDSConfig::is_initing_classes_at_dump_time(), we only allow archived objects to 1750 // point to the mirrors of (1) j.l.Object, (2) primitive classes, and (3) box classes. These are initialized 1751 // very early by HeapShared::init_box_classes(). 1752 if (orig_obj == vmClasses::Object_klass()->java_mirror() 1753 || java_lang_Class::is_primitive(orig_obj) 1754 || orig_obj == vmClasses::Boolean_klass()->java_mirror() 1755 || orig_obj == vmClasses::Character_klass()->java_mirror() 1756 || orig_obj == vmClasses::Float_klass()->java_mirror() 1757 || orig_obj == vmClasses::Double_klass()->java_mirror() 1758 || orig_obj == vmClasses::Byte_klass()->java_mirror() 1759 || orig_obj == vmClasses::Short_klass()->java_mirror() 1760 || orig_obj == vmClasses::Integer_klass()->java_mirror() 1761 || orig_obj == vmClasses::Long_klass()->java_mirror() 1762 || orig_obj == vmClasses::Void_klass()->java_mirror()) { 1763 orig_obj = scratch_java_mirror(orig_obj); 1764 assert(orig_obj != nullptr, "must be archived"); 1765 } else { 1766 // If you get an error here, you probably made a change in the JDK library that has added a Class 1767 // object that is referenced (directly or indirectly) by an ArchivableStaticFieldInfo 1768 // defined at the top of this file. 1769 log_error(cds, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1770 debug_trace(); 1771 MetaspaceShared::unrecoverable_writing_error(); 1772 } 1773 } 1774 1775 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1776 // orig_obj has already been archived and traced. Nothing more to do. 1777 return true; 1778 } else { 1779 set_has_been_seen_during_subgraph_recording(orig_obj); 1780 } 1781 1782 bool already_archived = has_been_archived(orig_obj); 1783 bool record_klasses_only = already_archived; 1784 if (!already_archived) { 1785 ++_num_new_archived_objs; 1786 if (!archive_object(orig_obj, referrer, subgraph_info)) { 1787 // Skip archiving the sub-graph referenced from the current entry field. 1788 ResourceMark rm; 1789 log_error(cds, heap)( 1790 "Cannot archive the sub-graph referenced from %s object (" 1791 PTR_FORMAT ") size %zu, skipped.", 1792 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1793 if (level == 1) { 1794 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1795 // as the Java code will take care of initializing this field dynamically. 1796 return false; 1797 } else { 1798 // We don't know how to handle an object that has been archived, but some of its reachable 1799 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1800 // we have a real use case. 1801 exit_on_error(); 1802 } 1803 } 1804 } 1805 1806 Klass *orig_k = orig_obj->klass(); 1807 subgraph_info->add_subgraph_object_klass(orig_k); 1808 1809 { 1810 // Find all the oops that are referenced by orig_obj, push them onto the stack 1811 // so we can work on them next. 1812 ResourceMark rm; 1813 ReferentPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj); 1814 orig_obj->oop_iterate(&pusher); 1815 } 1816 1817 if (CDSConfig::is_initing_classes_at_dump_time()) { 1818 // The enum klasses are archived with aot-initialized mirror. 1819 // See AOTClassInitializer::can_archive_initialized_mirror(). 1820 } else { 1821 if (CDSEnumKlass::is_enum_obj(orig_obj)) { 1822 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj); 1823 } 1824 } 1825 1826 return true; 1827 } 1828 1829 // 1830 // Start from the given static field in a java mirror and archive the 1831 // complete sub-graph of java heap objects that are reached directly 1832 // or indirectly from the starting object by following references. 1833 // Sub-graph archiving restrictions (current): 1834 // 1835 // - All classes of objects in the archived sub-graph (including the 1836 // entry class) must be boot class only. 1837 // - No java.lang.Class instance (java mirror) can be included inside 1838 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1839 // 1840 // The Java heap object sub-graph archiving process (see ReferentPusher): 1841 // 1842 // 1) Java object sub-graph archiving starts from a given static field 1843 // within a Class instance (java mirror). If the static field is a 1844 // reference field and points to a non-null java object, proceed to 1845 // the next step. 1846 // 1847 // 2) Archives the referenced java object. If an archived copy of the 1848 // current object already exists, updates the pointer in the archived 1849 // copy of the referencing object to point to the current archived object. 1850 // Otherwise, proceed to the next step. 1851 // 1852 // 3) Follows all references within the current java object and recursively 1853 // archive the sub-graph of objects starting from each reference. 1854 // 1855 // 4) Updates the pointer in the archived copy of referencing object to 1856 // point to the current archived object. 1857 // 1858 // 5) The Klass of the current java object is added to the list of Klasses 1859 // for loading and initializing before any object in the archived graph can 1860 // be accessed at runtime. 1861 // 1862 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1863 const char* klass_name, 1864 int field_offset, 1865 const char* field_name) { 1866 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1867 assert(k->is_shared_boot_class(), "must be boot class"); 1868 1869 oop m = k->java_mirror(); 1870 1871 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1872 oop f = m->obj_field(field_offset); 1873 1874 log_debug(cds, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1875 1876 if (!CompressedOops::is_null(f)) { 1877 if (log_is_enabled(Trace, cds, heap)) { 1878 LogTarget(Trace, cds, heap) log; 1879 LogStream out(log); 1880 f->print_on(&out); 1881 } 1882 1883 bool success = archive_reachable_objects_from(1, subgraph_info, f); 1884 if (!success) { 1885 log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1886 klass_name, field_name); 1887 } else { 1888 // Note: the field value is not preserved in the archived mirror. 1889 // Record the field as a new subGraph entry point. The recorded 1890 // information is restored from the archive at runtime. 1891 subgraph_info->add_subgraph_entry_field(field_offset, f); 1892 log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f)); 1893 } 1894 } else { 1895 // The field contains null, we still need to record the entry point, 1896 // so it can be restored at runtime. 1897 subgraph_info->add_subgraph_entry_field(field_offset, nullptr); 1898 } 1899 } 1900 1901 #ifndef PRODUCT 1902 class VerifySharedOopClosure: public BasicOopIterateClosure { 1903 public: 1904 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1905 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1906 1907 protected: 1908 template <class T> void do_oop_work(T *p) { 1909 oop obj = RawAccess<>::oop_load(p); 1910 if (!CompressedOops::is_null(obj)) { 1911 HeapShared::verify_reachable_objects_from(obj); 1912 } 1913 } 1914 }; 1915 1916 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1917 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1918 assert(k->is_shared_boot_class(), "must be boot class"); 1919 1920 oop m = k->java_mirror(); 1921 oop f = m->obj_field(field_offset); 1922 if (!CompressedOops::is_null(f)) { 1923 verify_subgraph_from(f); 1924 } 1925 } 1926 1927 void HeapShared::verify_subgraph_from(oop orig_obj) { 1928 if (!has_been_archived(orig_obj)) { 1929 // It's OK for the root of a subgraph to be not archived. See comments in 1930 // archive_reachable_objects_from(). 1931 return; 1932 } 1933 1934 // Verify that all objects reachable from orig_obj are archived. 1935 init_seen_objects_table(); 1936 verify_reachable_objects_from(orig_obj); 1937 delete_seen_objects_table(); 1938 } 1939 1940 void HeapShared::verify_reachable_objects_from(oop obj) { 1941 _num_total_verifications ++; 1942 if (java_lang_Class::is_instance(obj)) { 1943 obj = scratch_java_mirror(obj); 1944 assert(obj != nullptr, "must be"); 1945 } 1946 if (!has_been_seen_during_subgraph_recording(obj)) { 1947 set_has_been_seen_during_subgraph_recording(obj); 1948 assert(has_been_archived(obj), "must be"); 1949 VerifySharedOopClosure walker; 1950 obj->oop_iterate(&walker); 1951 } 1952 } 1953 #endif 1954 1955 void HeapShared::check_special_subgraph_classes() { 1956 if (CDSConfig::is_initing_classes_at_dump_time()) { 1957 // We can have aot-initialized classes (such as Enums) that can reference objects 1958 // of arbitrary types. Currently, we trust the JEP 483 implementation to only 1959 // aot-initialize classes that are "safe". 1960 // 1961 // TODO: we need an automatic tool that checks the safety of aot-initialized 1962 // classes (when we extend the set of aot-initialized classes beyond JEP 483) 1963 return; 1964 } else { 1965 // In this case, the special subgraph should contain a few specific types 1966 GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses(); 1967 int num = klasses->length(); 1968 for (int i = 0; i < num; i++) { 1969 Klass* subgraph_k = klasses->at(i); 1970 Symbol* name = subgraph_k->name(); 1971 if (subgraph_k->is_instance_klass() && 1972 name != vmSymbols::java_lang_Class() && 1973 name != vmSymbols::java_lang_String() && 1974 name != vmSymbols::java_lang_ArithmeticException() && 1975 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() && 1976 name != vmSymbols::java_lang_ArrayStoreException() && 1977 name != vmSymbols::java_lang_ClassCastException() && 1978 name != vmSymbols::java_lang_InternalError() && 1979 name != vmSymbols::java_lang_NullPointerException()) { 1980 ResourceMark rm; 1981 fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name()); 1982 } 1983 } 1984 } 1985 } 1986 1987 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr; 1988 HeapShared::PendingOop HeapShared::_object_being_archived; 1989 int HeapShared::_num_new_walked_objs; 1990 int HeapShared::_num_new_archived_objs; 1991 int HeapShared::_num_old_recorded_klasses; 1992 1993 int HeapShared::_num_total_subgraph_recordings = 0; 1994 int HeapShared::_num_total_walked_objs = 0; 1995 int HeapShared::_num_total_archived_objs = 0; 1996 int HeapShared::_num_total_recorded_klasses = 0; 1997 int HeapShared::_num_total_verifications = 0; 1998 1999 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 2000 return _seen_objects_table->get(obj) != nullptr; 2001 } 2002 2003 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 2004 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 2005 _seen_objects_table->put_when_absent(obj, true); 2006 _seen_objects_table->maybe_grow(); 2007 ++ _num_new_walked_objs; 2008 } 2009 2010 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 2011 log_info(cds, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 2012 init_subgraph_info(k, is_full_module_graph); 2013 init_seen_objects_table(); 2014 _num_new_walked_objs = 0; 2015 _num_new_archived_objs = 0; 2016 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 2017 } 2018 2019 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 2020 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 2021 _num_old_recorded_klasses; 2022 log_info(cds, heap)("Done recording subgraph(s) for archived fields in %s: " 2023 "walked %d objs, archived %d new objs, recorded %d classes", 2024 class_name, _num_new_walked_objs, _num_new_archived_objs, 2025 num_new_recorded_klasses); 2026 2027 delete_seen_objects_table(); 2028 2029 _num_total_subgraph_recordings ++; 2030 _num_total_walked_objs += _num_new_walked_objs; 2031 _num_total_archived_objs += _num_new_archived_objs; 2032 _num_total_recorded_klasses += num_new_recorded_klasses; 2033 } 2034 2035 class ArchivableStaticFieldFinder: public FieldClosure { 2036 InstanceKlass* _ik; 2037 Symbol* _field_name; 2038 bool _found; 2039 int _offset; 2040 public: 2041 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 2042 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 2043 2044 virtual void do_field(fieldDescriptor* fd) { 2045 if (fd->name() == _field_name) { 2046 assert(!_found, "fields can never be overloaded"); 2047 if (is_reference_type(fd->field_type())) { 2048 _found = true; 2049 _offset = fd->offset(); 2050 } 2051 } 2052 } 2053 bool found() { return _found; } 2054 int offset() { return _offset; } 2055 }; 2056 2057 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 2058 TRAPS) { 2059 for (int i = 0; fields[i].valid(); i++) { 2060 ArchivableStaticFieldInfo* info = &fields[i]; 2061 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 2062 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 2063 ResourceMark rm; // for stringStream::as_string() etc. 2064 2065 #ifndef PRODUCT 2066 bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0); 2067 const char* test_class_name = ArchiveHeapTestClass; 2068 #else 2069 bool is_test_class = false; 2070 const char* test_class_name = ""; // avoid C++ printf checks warnings. 2071 #endif 2072 2073 if (is_test_class) { 2074 log_warning(cds)("Loading ArchiveHeapTestClass %s ...", test_class_name); 2075 } 2076 2077 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD); 2078 if (HAS_PENDING_EXCEPTION) { 2079 CLEAR_PENDING_EXCEPTION; 2080 stringStream st; 2081 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name); 2082 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2083 } 2084 2085 if (!k->is_instance_klass()) { 2086 stringStream st; 2087 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name); 2088 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2089 } 2090 2091 InstanceKlass* ik = InstanceKlass::cast(k); 2092 assert(InstanceKlass::cast(ik)->is_shared_boot_class(), 2093 "Only support boot classes"); 2094 2095 if (is_test_class) { 2096 if (ik->module()->is_named()) { 2097 // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary 2098 // core-lib classes. You need to at least append to the bootclasspath. 2099 stringStream st; 2100 st.print("ArchiveHeapTestClass %s is not in unnamed module", test_class_name); 2101 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2102 } 2103 2104 if (ik->package() != nullptr) { 2105 // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy. 2106 stringStream st; 2107 st.print("ArchiveHeapTestClass %s is not in unnamed package", test_class_name); 2108 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2109 } 2110 } else { 2111 if (ik->module()->name() != vmSymbols::java_base()) { 2112 // We don't want to deal with cases when a module is unavailable at runtime. 2113 // FUTURE -- load from archived heap only when module graph has not changed 2114 // between dump and runtime. 2115 stringStream st; 2116 st.print("%s is not in java.base module", info->klass_name); 2117 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2118 } 2119 } 2120 2121 if (is_test_class) { 2122 log_warning(cds)("Initializing ArchiveHeapTestClass %s ...", test_class_name); 2123 } 2124 ik->initialize(CHECK); 2125 2126 ArchivableStaticFieldFinder finder(ik, field_name); 2127 ik->do_local_static_fields(&finder); 2128 if (!finder.found()) { 2129 stringStream st; 2130 st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name); 2131 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2132 } 2133 2134 info->klass = ik; 2135 info->offset = finder.offset(); 2136 } 2137 } 2138 2139 void HeapShared::init_subgraph_entry_fields(TRAPS) { 2140 assert(CDSConfig::is_dumping_heap(), "must be"); 2141 _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); 2142 init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); 2143 if (CDSConfig::is_dumping_full_module_graph()) { 2144 init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); 2145 } 2146 } 2147 2148 #ifndef PRODUCT 2149 void HeapShared::setup_test_class(const char* test_class_name) { 2150 ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields; 2151 int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 2152 assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below"); 2153 assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list"); 2154 2155 if (test_class_name != nullptr) { 2156 p[num_slots - 2].klass_name = test_class_name; 2157 p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME; 2158 } 2159 } 2160 2161 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass 2162 // during runtime. This may be called before the module system is initialized so 2163 // we cannot rely on InstanceKlass::module(), etc. 2164 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) { 2165 if (_test_class != nullptr) { 2166 if (ik == _test_class) { 2167 return true; 2168 } 2169 Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses(); 2170 if (klasses == nullptr) { 2171 return false; 2172 } 2173 2174 for (int i = 0; i < klasses->length(); i++) { 2175 Klass* k = klasses->at(i); 2176 if (k == ik) { 2177 Symbol* name; 2178 if (k->is_instance_klass()) { 2179 name = InstanceKlass::cast(k)->name(); 2180 } else if (k->is_objArray_klass()) { 2181 Klass* bk = ObjArrayKlass::cast(k)->bottom_klass(); 2182 if (!bk->is_instance_klass()) { 2183 return false; 2184 } 2185 name = bk->name(); 2186 } else { 2187 return false; 2188 } 2189 2190 // See KlassSubGraphInfo::check_allowed_klass() - we only allow test classes 2191 // to be: 2192 // (A) java.base classes (which must not be in the unnamed module) 2193 // (B) test classes which must be in the unnamed package of the unnamed module. 2194 // So if we see a '/' character in the class name, it must be in (A); 2195 // otherwise it must be in (B). 2196 if (name->index_of_at(0, "/", 1) >= 0) { 2197 return false; // (A) 2198 } 2199 2200 return true; // (B) 2201 } 2202 } 2203 } 2204 2205 return false; 2206 } 2207 2208 void HeapShared::initialize_test_class_from_archive(JavaThread* current) { 2209 Klass* k = _test_class; 2210 if (k != nullptr && ArchiveHeapLoader::is_in_use()) { 2211 JavaThread* THREAD = current; 2212 ExceptionMark em(THREAD); 2213 const ArchivedKlassSubGraphInfoRecord* record = 2214 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 2215 2216 // The _test_class is in the unnamed module, so it can't call CDS.initializeFromArchive() 2217 // from its <clinit> method. So we set up its "archivedObjects" field first, before 2218 // calling its <clinit>. This is not strictly clean, but it's a convenient way to write unit 2219 // test cases (see test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchiveHeapTestClass.java). 2220 if (record != nullptr) { 2221 init_archived_fields_for(k, record); 2222 } 2223 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 2224 } 2225 } 2226 #endif 2227 2228 void HeapShared::init_for_dumping(TRAPS) { 2229 if (CDSConfig::is_dumping_heap()) { 2230 setup_test_class(ArchiveHeapTestClass); 2231 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE); 2232 init_subgraph_entry_fields(CHECK); 2233 } 2234 } 2235 2236 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 2237 bool is_full_module_graph) { 2238 _num_total_subgraph_recordings = 0; 2239 _num_total_walked_objs = 0; 2240 _num_total_archived_objs = 0; 2241 _num_total_recorded_klasses = 0; 2242 _num_total_verifications = 0; 2243 2244 // For each class X that has one or more archived fields: 2245 // [1] Dump the subgraph of each archived field 2246 // [2] Create a list of all the class of the objects that can be reached 2247 // by any of these static fields. 2248 // At runtime, these classes are initialized before X's archived fields 2249 // are restored by HeapShared::initialize_from_archived_subgraph(). 2250 for (int i = 0; fields[i].valid(); ) { 2251 ArchivableStaticFieldInfo* info = &fields[i]; 2252 const char* klass_name = info->klass_name; 2253 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 2254 2255 ContextMark cm(klass_name); 2256 // If you have specified consecutive fields of the same klass in 2257 // fields[], these will be archived in the same 2258 // {start_recording_subgraph ... done_recording_subgraph} pass to 2259 // save time. 2260 for (; fields[i].valid(); i++) { 2261 ArchivableStaticFieldInfo* f = &fields[i]; 2262 if (f->klass_name != klass_name) { 2263 break; 2264 } 2265 2266 ContextMark cm(f->field_name); 2267 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 2268 f->offset, f->field_name); 2269 } 2270 done_recording_subgraph(info->klass, klass_name); 2271 } 2272 2273 log_info(cds, heap)("Archived subgraph records = %d", 2274 _num_total_subgraph_recordings); 2275 log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs); 2276 log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs); 2277 log_info(cds, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 2278 2279 #ifndef PRODUCT 2280 for (int i = 0; fields[i].valid(); i++) { 2281 ArchivableStaticFieldInfo* f = &fields[i]; 2282 verify_subgraph_from_static_field(f->klass, f->offset); 2283 } 2284 log_info(cds, heap)(" Verified %d references", _num_total_verifications); 2285 #endif 2286 } 2287 2288 // Not all the strings in the global StringTable are dumped into the archive, because 2289 // some of those strings may be only referenced by classes that are excluded from 2290 // the archive. We need to explicitly mark the strings that are: 2291 // [1] used by classes that WILL be archived; 2292 // [2] included in the SharedArchiveConfigFile. 2293 void HeapShared::add_to_dumped_interned_strings(oop string) { 2294 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 2295 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be"); 2296 bool created; 2297 _dumped_interned_strings->put_if_absent(string, true, &created); 2298 if (created) { 2299 // Prevent string deduplication from changing the value field to 2300 // something not in the archive. 2301 java_lang_String::set_deduplication_forbidden(string); 2302 _dumped_interned_strings->maybe_grow(); 2303 } 2304 } 2305 2306 bool HeapShared::is_dumped_interned_string(oop o) { 2307 return _dumped_interned_strings->get(o) != nullptr; 2308 } 2309 2310 void HeapShared::debug_trace() { 2311 ResourceMark rm; 2312 oop referrer = _object_being_archived.referrer(); 2313 if (referrer != nullptr) { 2314 LogStream ls(Log(cds, heap)::error()); 2315 ls.print_cr("Reference trace"); 2316 CDSHeapVerifier::trace_to_root(&ls, referrer); 2317 } 2318 } 2319 2320 #ifndef PRODUCT 2321 // At dump-time, find the location of all the non-null oop pointers in an archived heap 2322 // region. This way we can quickly relocate all the pointers without using 2323 // BasicOopIterateClosure at runtime. 2324 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 2325 void* _start; 2326 BitMap *_oopmap; 2327 int _num_total_oops; 2328 int _num_null_oops; 2329 public: 2330 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 2331 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 2332 2333 virtual void do_oop(narrowOop* p) { 2334 assert(UseCompressedOops, "sanity"); 2335 _num_total_oops ++; 2336 narrowOop v = *p; 2337 if (!CompressedOops::is_null(v)) { 2338 size_t idx = p - (narrowOop*)_start; 2339 _oopmap->set_bit(idx); 2340 } else { 2341 _num_null_oops ++; 2342 } 2343 } 2344 virtual void do_oop(oop* p) { 2345 assert(!UseCompressedOops, "sanity"); 2346 _num_total_oops ++; 2347 if ((*p) != nullptr) { 2348 size_t idx = p - (oop*)_start; 2349 _oopmap->set_bit(idx); 2350 } else { 2351 _num_null_oops ++; 2352 } 2353 } 2354 int num_total_oops() const { return _num_total_oops; } 2355 int num_null_oops() const { return _num_null_oops; } 2356 }; 2357 #endif 2358 2359 void HeapShared::count_allocation(size_t size) { 2360 _total_obj_count ++; 2361 _total_obj_size += size; 2362 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 2363 if (size <= (size_t(1) << i)) { 2364 _alloc_count[i] ++; 2365 _alloc_size[i] += size; 2366 return; 2367 } 2368 } 2369 } 2370 2371 static double avg_size(size_t size, size_t count) { 2372 double avg = 0; 2373 if (count > 0) { 2374 avg = double(size * HeapWordSize) / double(count); 2375 } 2376 return avg; 2377 } 2378 2379 void HeapShared::print_stats() { 2380 size_t huge_count = _total_obj_count; 2381 size_t huge_size = _total_obj_size; 2382 2383 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 2384 size_t byte_size_limit = (size_t(1) << i) * HeapWordSize; 2385 size_t count = _alloc_count[i]; 2386 size_t size = _alloc_size[i]; 2387 log_info(cds, heap)("%8zu objects are <= %-6zu" 2388 " bytes (total %8zu bytes, avg %8.1f bytes)", 2389 count, byte_size_limit, size * HeapWordSize, avg_size(size, count)); 2390 huge_count -= count; 2391 huge_size -= size; 2392 } 2393 2394 log_info(cds, heap)("%8zu huge objects (total %8zu bytes" 2395 ", avg %8.1f bytes)", 2396 huge_count, huge_size * HeapWordSize, 2397 avg_size(huge_size, huge_count)); 2398 log_info(cds, heap)("%8zu total objects (total %8zu bytes" 2399 ", avg %8.1f bytes)", 2400 _total_obj_count, _total_obj_size * HeapWordSize, 2401 avg_size(_total_obj_size, _total_obj_count)); 2402 } 2403 2404 bool HeapShared::is_archived_boot_layer_available(JavaThread* current) { 2405 TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS); 2406 InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle()); 2407 if (k == nullptr) { 2408 return false; 2409 } else { 2410 TempNewSymbol field_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_FIELD); 2411 TempNewSymbol field_signature = SymbolTable::new_symbol("Ljdk/internal/module/ArchivedBootLayer;"); 2412 fieldDescriptor fd; 2413 if (k->find_field(field_name, field_signature, true, &fd) != nullptr) { 2414 oop m = k->java_mirror(); 2415 oop f = m->obj_field(fd.offset()); 2416 if (CompressedOops::is_null(f)) { 2417 return false; 2418 } 2419 } else { 2420 return false; 2421 } 2422 } 2423 return true; 2424 } 2425 2426 #endif // INCLUDE_CDS_JAVA_HEAP