1 /* 2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotArtifactFinder.hpp" 26 #include "cds/aotCacheAccess.hpp" 27 #include "cds/aotClassInitializer.hpp" 28 #include "cds/aotClassLocation.hpp" 29 #include "cds/aotConstantPoolResolver.hpp" 30 #include "cds/aotLogging.hpp" 31 #include "cds/aotMetaspace.hpp" 32 #include "cds/aotReferenceObjSupport.hpp" 33 #include "cds/archiveBuilder.hpp" 34 #include "cds/archiveHeapLoader.hpp" 35 #include "cds/archiveHeapWriter.hpp" 36 #include "cds/archiveUtils.hpp" 37 #include "cds/cdsConfig.hpp" 38 #include "cds/cdsEnumKlass.hpp" 39 #include "cds/cdsHeapVerifier.hpp" 40 #include "cds/heapShared.hpp" 41 #include "cds/regeneratedClasses.hpp" 42 #include "classfile/classLoaderData.hpp" 43 #include "classfile/javaClasses.inline.hpp" 44 #include "classfile/modules.hpp" 45 #include "classfile/stringTable.hpp" 46 #include "classfile/symbolTable.hpp" 47 #include "classfile/systemDictionary.hpp" 48 #include "classfile/systemDictionaryShared.hpp" 49 #include "classfile/vmClasses.hpp" 50 #include "classfile/vmSymbols.hpp" 51 #include "gc/shared/collectedHeap.hpp" 52 #include "gc/shared/gcLocker.hpp" 53 #include "gc/shared/gcVMOperations.hpp" 54 #include "logging/log.hpp" 55 #include "logging/logStream.hpp" 56 #include "memory/iterator.inline.hpp" 57 #include "memory/resourceArea.hpp" 58 #include "memory/universe.hpp" 59 #include "oops/compressedOops.inline.hpp" 60 #include "oops/fieldStreams.inline.hpp" 61 #include "oops/objArrayOop.inline.hpp" 62 #include "oops/oop.inline.hpp" 63 #include "oops/oopHandle.inline.hpp" 64 #include "oops/typeArrayOop.inline.hpp" 65 #include "prims/jvmtiExport.hpp" 66 #include "runtime/arguments.hpp" 67 #include "runtime/fieldDescriptor.inline.hpp" 68 #include "runtime/init.hpp" 69 #include "runtime/javaCalls.hpp" 70 #include "runtime/mutexLocker.hpp" 71 #include "runtime/safepointVerifiers.hpp" 72 #include "utilities/bitMap.inline.hpp" 73 #include "utilities/copy.hpp" 74 #if INCLUDE_G1GC 75 #include "gc/g1/g1CollectedHeap.hpp" 76 #endif 77 78 #if INCLUDE_CDS_JAVA_HEAP 79 80 struct ArchivableStaticFieldInfo { 81 const char* klass_name; 82 const char* field_name; 83 InstanceKlass* klass; 84 int offset; 85 BasicType type; 86 87 ArchivableStaticFieldInfo(const char* k, const char* f) 88 : klass_name(k), field_name(f), klass(nullptr), offset(0), type(T_ILLEGAL) {} 89 90 bool valid() { 91 return klass_name != nullptr; 92 } 93 }; 94 95 class HeapShared::ContextMark : public StackObj { 96 ResourceMark rm; 97 public: 98 ContextMark(const char* c) : rm{} { 99 _context->push(c); 100 } 101 ~ContextMark() { 102 _context->pop(); 103 } 104 }; 105 106 DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr; 107 108 size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS]; 109 size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS]; 110 size_t HeapShared::_total_obj_count; 111 size_t HeapShared::_total_obj_size; 112 113 #ifndef PRODUCT 114 #define ARCHIVE_TEST_FIELD_NAME "archivedObjects" 115 static Array<char>* _archived_ArchiveHeapTestClass = nullptr; 116 static const char* _test_class_name = nullptr; 117 static Klass* _test_class = nullptr; 118 static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr; 119 #endif 120 121 122 // 123 // If you add new entries to the following tables, you should know what you're doing! 124 // 125 126 static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = { 127 {"java/lang/Integer$IntegerCache", "archivedCache"}, 128 {"java/lang/Long$LongCache", "archivedCache"}, 129 {"java/lang/Byte$ByteCache", "archivedCache"}, 130 {"java/lang/Short$ShortCache", "archivedCache"}, 131 {"java/lang/Character$CharacterCache", "archivedCache"}, 132 {"java/util/jar/Attributes$Name", "KNOWN_NAMES"}, 133 {"sun/util/locale/BaseLocale", "constantBaseLocales"}, 134 {"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"}, 135 {"java/util/ImmutableCollections", "archivedObjects"}, 136 {"java/lang/ModuleLayer", "EMPTY_LAYER"}, 137 {"java/lang/module/Configuration", "EMPTY_CONFIGURATION"}, 138 {"jdk/internal/math/FDBigInteger", "archivedCaches"}, 139 {"java/lang/reflect/Proxy$ProxyBuilder", "archivedData"}, // FIXME -- requires AOTClassLinking 140 141 #ifndef PRODUCT 142 {nullptr, nullptr}, // Extra slot for -XX:ArchiveHeapTestClass 143 #endif 144 {nullptr, nullptr}, 145 }; 146 147 // full module graph 148 static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = { 149 {"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"}, 150 {ARCHIVED_BOOT_LAYER_CLASS, ARCHIVED_BOOT_LAYER_FIELD}, 151 {"java/lang/Module$ArchivedData", "archivedData"}, 152 {nullptr, nullptr}, 153 }; 154 155 KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph; 156 ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph; 157 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_pending_roots = nullptr; 158 GrowableArrayCHeap<const char*, mtClassShared>* HeapShared::_context = nullptr; 159 GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr; 160 int HeapShared::_root_segment_max_size_elems; 161 OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1]; 162 MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr; 163 164 static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], InstanceKlass* ik) { 165 for (int i = 0; fields[i].valid(); i++) { 166 if (fields[i].klass == ik) { 167 return true; 168 } 169 } 170 return false; 171 } 172 173 bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { 174 return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || 175 is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); 176 } 177 178 oop HeapShared::CachedOopInfo::orig_referrer() const { 179 return _orig_referrer.resolve(); 180 } 181 182 void HeapShared::rehash_archived_object_cache() { 183 if (!CDSConfig::is_dumping_heap()) { 184 return; 185 } 186 assert(SafepointSynchronize::is_at_safepoint() || 187 JavaThread::current()->is_in_no_safepoint_scope(), "sanity"); 188 189 ArchivedObjectCache* new_cache = 190 new (mtClass)ArchivedObjectCache(archived_object_cache()->table_size(), MAX_TABLE_SIZE); 191 192 archived_object_cache()->iterate_all([&](OopHandle o, CachedOopInfo& info) { 193 new_cache->put_when_absent(o, info); 194 }); 195 196 delete _archived_object_cache; 197 _archived_object_cache = new_cache; 198 } 199 200 unsigned HeapShared::oop_hash(oop const& p) { 201 assert(SafepointSynchronize::is_at_safepoint() || 202 JavaThread::current()->is_in_no_safepoint_scope(), "sanity"); 203 // Do not call p->identity_hash() as that will update the 204 // object header. 205 return primitive_hash(cast_from_oop<intptr_t>(p)); 206 } 207 208 unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) { 209 return oop_hash(oh.resolve()); 210 } 211 212 unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) { 213 oop o = oh.resolve(); 214 if (o == nullptr) { 215 return 0; 216 } else { 217 return o->identity_hash(); 218 } 219 } 220 221 bool HeapShared::oop_handle_equals(const OopHandle& a, const OopHandle& b) { 222 return a.resolve() == b.resolve(); 223 } 224 225 static void reset_states(oop obj, TRAPS) { 226 Handle h_obj(THREAD, obj); 227 InstanceKlass* klass = InstanceKlass::cast(obj->klass()); 228 TempNewSymbol method_name = SymbolTable::new_symbol("resetArchivedStates"); 229 Symbol* method_sig = vmSymbols::void_method_signature(); 230 231 while (klass != nullptr) { 232 Method* method = klass->find_method(method_name, method_sig); 233 if (method != nullptr) { 234 assert(method->is_private(), "must be"); 235 if (log_is_enabled(Debug, aot)) { 236 ResourceMark rm(THREAD); 237 log_debug(aot)(" calling %s", method->name_and_sig_as_C_string()); 238 } 239 JavaValue result(T_VOID); 240 JavaCalls::call_special(&result, h_obj, klass, 241 method_name, method_sig, CHECK); 242 } 243 klass = klass->super(); 244 } 245 } 246 247 void HeapShared::reset_archived_object_states(TRAPS) { 248 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 249 log_debug(aot)("Resetting platform loader"); 250 reset_states(SystemDictionary::java_platform_loader(), CHECK); 251 log_debug(aot)("Resetting system loader"); 252 reset_states(SystemDictionary::java_system_loader(), CHECK); 253 254 // Clean up jdk.internal.loader.ClassLoaders::bootLoader(), which is not 255 // directly used for class loading, but rather is used by the core library 256 // to keep track of resources, etc, loaded by the null class loader. 257 // 258 // Note, this object is non-null, and is not the same as 259 // ClassLoaderData::the_null_class_loader_data()->class_loader(), 260 // which is null. 261 log_debug(aot)("Resetting boot loader"); 262 JavaValue result(T_OBJECT); 263 JavaCalls::call_static(&result, 264 vmClasses::jdk_internal_loader_ClassLoaders_klass(), 265 vmSymbols::bootLoader_name(), 266 vmSymbols::void_BuiltinClassLoader_signature(), 267 CHECK); 268 Handle boot_loader(THREAD, result.get_oop()); 269 reset_states(boot_loader(), CHECK); 270 } 271 272 HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr; 273 274 bool HeapShared::has_been_archived(oop obj) { 275 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 276 OopHandle oh(&obj); 277 return archived_object_cache()->get(oh) != nullptr; 278 } 279 280 int HeapShared::append_root(oop obj) { 281 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 282 if (obj != nullptr) { 283 assert(has_been_archived(obj), "must be"); 284 } 285 // No GC should happen since we aren't scanning _pending_roots. 286 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 287 288 OopHandle oh(Universe::vm_global(), obj); 289 return _pending_roots->append(oh); 290 } 291 292 objArrayOop HeapShared::root_segment(int segment_idx) { 293 if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) { 294 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); 295 } else { 296 assert(CDSConfig::is_using_archive(), "must be"); 297 } 298 299 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve(); 300 assert(segment != nullptr, "should have been initialized"); 301 return segment; 302 } 303 304 class OrigToScratchObjectTable: public HashTable<OopHandle, OopHandle, 305 36137, // prime number 306 AnyObj::C_HEAP, 307 mtClassShared, 308 HeapShared::oop_handle_hash, 309 HeapShared::oop_handle_equals> {}; 310 311 static OrigToScratchObjectTable* _orig_to_scratch_object_table = nullptr; 312 313 void HeapShared::track_scratch_object(oop orig_obj, oop scratch_obj) { 314 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 315 if (_orig_to_scratch_object_table == nullptr) { 316 _orig_to_scratch_object_table = new (mtClass)OrigToScratchObjectTable(); 317 } 318 319 OopHandle orig_h(Universe::vm_global(), orig_obj); 320 OopHandle scratch_h(Universe::vm_global(), scratch_obj); 321 _orig_to_scratch_object_table->put_when_absent(orig_h, scratch_h); 322 } 323 324 oop HeapShared::orig_to_scratch_object(oop orig_obj) { 325 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 326 if (_orig_to_scratch_object_table != nullptr) { 327 OopHandle orig(&orig_obj); 328 OopHandle* v = _orig_to_scratch_object_table->get(orig); 329 if (v != nullptr) { 330 return v->resolve(); 331 } 332 } 333 return nullptr; 334 } 335 336 // Permanent oops are used to support AOT-compiled methods, which may have in-line references 337 // to Strings and MH oops. 338 // 339 // At runtime, these oops are stored in _runtime_permanent_oops (which keeps them alive forever) 340 // and are accssed vis AOTCacheAccess::get_archived_object(int). 341 struct PermanentOopInfo { 342 int _index; // Gets assigned only if HeapShared::get_archived_object_permanent_index() has been called on the object 343 int _heap_offset; // Offset of the object from the bottom of the archived heap. 344 PermanentOopInfo(int index, int heap_offset) : _index(index), _heap_offset(heap_offset) {} 345 }; 346 347 class PermanentOopTable: public HashTable<OopHandle, PermanentOopInfo, 348 36137, // prime number 349 AnyObj::C_HEAP, 350 mtClassShared, 351 HeapShared::oop_handle_hash, 352 HeapShared::oop_handle_equals> {}; 353 354 static int _dumptime_permanent_oop_count = 0; 355 static PermanentOopTable* _dumptime_permanent_oop_table = nullptr; 356 static GrowableArrayCHeap<OopHandle, mtClassShared>* _runtime_permanent_oops = nullptr; 357 358 // ArchiveHeapWriter adds each archived heap object to _dumptime_permanent_oop_table, 359 // so we can remember their offset (from the bottom of the archived heap). 360 void HeapShared::add_to_permanent_oop_table(oop obj, int offset) { 361 assert_at_safepoint(); 362 if (_dumptime_permanent_oop_table == nullptr) { 363 _dumptime_permanent_oop_table = new (mtClass)PermanentOopTable(); 364 } 365 366 PermanentOopInfo info(-1, offset); 367 OopHandle oh(Universe::vm_global(), obj); 368 _dumptime_permanent_oop_table->put_when_absent(oh, info); 369 } 370 371 // A permanent index is assigned to an archived object ONLY when 372 // the AOT compiler calls this function. 373 int HeapShared::get_archived_object_permanent_index(oop obj) { 374 MutexLocker ml(ArchivedObjectTables_lock, Mutex::_no_safepoint_check_flag); 375 376 if (!CDSConfig::is_dumping_heap()) { 377 return -1; // Called by the Leyden old workflow 378 } 379 if (_dumptime_permanent_oop_table == nullptr) { 380 return -1; 381 } 382 383 if (_orig_to_scratch_object_table != nullptr) { 384 OopHandle orig(&obj); 385 OopHandle* v = _orig_to_scratch_object_table->get(orig); 386 if (v != nullptr) { 387 obj = v->resolve(); 388 } 389 } 390 391 OopHandle tmp(&obj); 392 PermanentOopInfo* info = _dumptime_permanent_oop_table->get(tmp); 393 if (info == nullptr) { 394 return -1; 395 } else { 396 if (info->_index < 0) { 397 info->_index = _dumptime_permanent_oop_count++; 398 } 399 return info->_index; 400 } 401 } 402 403 oop HeapShared::get_archived_object(int permanent_index) { 404 assert(permanent_index >= 0, "sanity"); 405 assert(ArchiveHeapLoader::is_in_use(), "sanity"); 406 assert(_runtime_permanent_oops != nullptr, "sanity"); 407 408 return _runtime_permanent_oops->at(permanent_index).resolve(); 409 } 410 411 // Remember all archived heap objects that have a permanent index. 412 // table[i] = offset of oop whose permanent index is i. 413 void CachedCodeDirectoryInternal::dumptime_init_internal() { 414 const int count = _dumptime_permanent_oop_count; 415 if (count == 0) { 416 // Avoid confusing CDS code with zero-sized tables, just return. 417 log_info(cds)("No permanent oops"); 418 _permanent_oop_count = count; 419 _permanent_oop_offsets = nullptr; 420 return; 421 } 422 423 int* table = (int*)AOTCacheAccess::allocate_aot_code_region(count * sizeof(int)); 424 for (int i = 0; i < count; i++) { 425 table[count] = -1; 426 } 427 _dumptime_permanent_oop_table->iterate([&](OopHandle o, PermanentOopInfo& info) { 428 int index = info._index; 429 if (index >= 0) { 430 assert(index < count, "sanity"); 431 table[index] = info._heap_offset; 432 } 433 return true; // continue 434 }); 435 436 for (int i = 0; i < count; i++) { 437 assert(table[i] >= 0, "must be"); 438 } 439 440 log_info(cds)("Dumped %d permanent oops", count); 441 442 _permanent_oop_count = count; 443 AOTCacheAccess::set_pointer(&_permanent_oop_offsets, table); 444 } 445 446 // This is called during the bootstrap of the production run, before any GC can happen. 447 // Record each permanent oop in a OopHandle for GC safety. 448 void CachedCodeDirectoryInternal::runtime_init_internal() { 449 int count = _permanent_oop_count; 450 int* table = _permanent_oop_offsets; 451 _runtime_permanent_oops = new GrowableArrayCHeap<OopHandle, mtClassShared>(); 452 for (int i = 0; i < count; i++) { 453 oop obj = ArchiveHeapLoader::oop_from_offset(table[i]); 454 OopHandle oh(Universe::vm_global(), obj); 455 _runtime_permanent_oops->append(oh); 456 } 457 }; 458 459 void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) { 460 assert(_root_segment_max_size_elems > 0, "sanity"); 461 462 // Try to avoid divisions for the common case. 463 if (idx < _root_segment_max_size_elems) { 464 seg_idx = 0; 465 int_idx = idx; 466 } else { 467 seg_idx = idx / _root_segment_max_size_elems; 468 int_idx = idx % _root_segment_max_size_elems; 469 } 470 471 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx, 472 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx); 473 } 474 475 // Returns an objArray that contains all the roots of the archived objects 476 oop HeapShared::get_root(int index, bool clear) { 477 assert(index >= 0, "sanity"); 478 assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only"); 479 assert(!_root_segments->is_empty(), "must have loaded shared heap"); 480 int seg_idx, int_idx; 481 get_segment_indexes(index, seg_idx, int_idx); 482 oop result = root_segment(seg_idx)->obj_at(int_idx); 483 if (clear) { 484 clear_root(index); 485 } 486 return result; 487 } 488 489 void HeapShared::clear_root(int index) { 490 assert(index >= 0, "sanity"); 491 assert(CDSConfig::is_using_archive(), "must be"); 492 if (ArchiveHeapLoader::is_in_use()) { 493 int seg_idx, int_idx; 494 get_segment_indexes(index, seg_idx, int_idx); 495 if (log_is_enabled(Debug, aot, heap)) { 496 oop old = root_segment(seg_idx)->obj_at(int_idx); 497 log_debug(aot, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old)); 498 } 499 root_segment(seg_idx)->obj_at_put(int_idx, nullptr); 500 } 501 } 502 503 bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) { 504 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 505 506 assert(!obj->is_stackChunk(), "do not archive stack chunks"); 507 if (has_been_archived(obj)) { 508 return true; 509 } 510 511 if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) { 512 log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu", 513 p2i(obj), obj->size()); 514 debug_trace(); 515 return false; 516 } else { 517 count_allocation(obj->size()); 518 ArchiveHeapWriter::add_source_obj(obj); 519 CachedOopInfo info = make_cached_oop_info(obj, referrer); 520 521 OopHandle oh(Universe::vm_global(), obj); 522 archived_object_cache()->put_when_absent(oh, info); 523 archived_object_cache()->maybe_grow(); 524 mark_native_pointers(obj); 525 526 Klass* k = obj->klass(); 527 if (k->is_instance_klass()) { 528 // Whenever we see a non-array Java object of type X, we mark X to be aot-initialized. 529 // This ensures that during the production run, whenever Java code sees a cached object 530 // of type X, we know that X is already initialized. (see TODO comment below ...) 531 532 if (InstanceKlass::cast(k)->is_enum_subclass() 533 // We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so 534 // we must store them as AOT-initialized. 535 || (subgraph_info == _dump_time_special_subgraph)) 536 // TODO: we do this only for the special subgraph for now. Extending this to 537 // other subgraphs would require more refactoring of the core library (such as 538 // move some initialization logic into runtimeSetup()). 539 // 540 // For the other subgraphs, we have a weaker mechanism to ensure that 541 // all classes in a subgraph are initialized before the subgraph is programmatically 542 // returned from jdk.internal.misc.CDS::initializeFromArchive(). 543 // See HeapShared::initialize_from_archived_subgraph(). 544 { 545 AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k)); 546 } 547 548 if (java_lang_Class::is_instance(obj)) { 549 Klass* mirror_k = java_lang_Class::as_Klass(obj); 550 if (mirror_k != nullptr) { 551 AOTArtifactFinder::add_cached_class(mirror_k); 552 } 553 } else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) { 554 Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj); 555 if (m != nullptr) { 556 if (RegeneratedClasses::has_been_regenerated(m)) { 557 m = RegeneratedClasses::get_regenerated_object(m); 558 } 559 InstanceKlass* method_holder = m->method_holder(); 560 AOTArtifactFinder::add_cached_class(method_holder); 561 } 562 } 563 } 564 565 if (log_is_enabled(Debug, aot, heap)) { 566 ResourceMark rm; 567 LogTarget(Debug, aot, heap) log; 568 LogStream out(log); 569 out.print("Archived heap object " PTR_FORMAT " : %s ", 570 p2i(obj), obj->klass()->external_name()); 571 if (java_lang_Class::is_instance(obj)) { 572 Klass* k = java_lang_Class::as_Klass(obj); 573 if (k != nullptr) { 574 out.print("%s", k->external_name()); 575 } else { 576 out.print("primitive"); 577 } 578 } 579 out.cr(); 580 } 581 582 return true; 583 } 584 } 585 586 class MetaspaceObjToOopHandleTable: public HashTable<MetaspaceObj*, OopHandle, 587 36137, // prime number 588 AnyObj::C_HEAP, 589 mtClassShared> { 590 public: 591 oop get_oop(MetaspaceObj* ptr) { 592 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 593 OopHandle* handle = get(ptr); 594 if (handle != nullptr) { 595 return handle->resolve(); 596 } else { 597 return nullptr; 598 } 599 } 600 void set_oop(MetaspaceObj* ptr, oop o) { 601 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 602 OopHandle handle(Universe::vm_global(), o); 603 bool is_new = put(ptr, handle); 604 assert(is_new, "cannot set twice"); 605 } 606 void remove_oop(MetaspaceObj* ptr) { 607 MutexLocker ml(ScratchObjects_lock, Mutex::_no_safepoint_check_flag); 608 OopHandle* handle = get(ptr); 609 if (handle != nullptr) { 610 handle->release(Universe::vm_global()); 611 remove(ptr); 612 } 613 } 614 }; 615 616 void HeapShared::add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) { 617 if (SystemDictionaryShared::is_builtin_loader(src->pool_holder()->class_loader_data())) { 618 _scratch_objects_table->set_oop(src, dest); 619 } 620 } 621 622 objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) { 623 return (objArrayOop)_scratch_objects_table->get_oop(src); 624 } 625 626 void HeapShared::init_dumping() { 627 _scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable(); 628 _pending_roots = new GrowableArrayCHeap<OopHandle, mtClassShared>(500); 629 } 630 631 void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) { 632 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 633 BasicType bt = (BasicType)i; 634 if (!is_reference_type(bt)) { 635 oop m = java_lang_Class::create_basic_type_mirror(type2name(bt), bt, CHECK); 636 _scratch_basic_type_mirrors[i] = OopHandle(Universe::vm_global(), m); 637 track_scratch_object(Universe::java_mirror(bt), m); 638 } 639 } 640 } 641 642 // Given java_mirror that represents a (primitive or reference) type T, 643 // return the "scratch" version that represents the same type T. 644 // Note that if java_mirror will be returned if it's already a 645 // scratch mirror. 646 // 647 // See java_lang_Class::create_scratch_mirror() for more info. 648 oop HeapShared::scratch_java_mirror(oop java_mirror) { 649 assert(java_lang_Class::is_instance(java_mirror), "must be"); 650 651 for (int i = T_BOOLEAN; i < T_VOID+1; i++) { 652 BasicType bt = (BasicType)i; 653 if (!is_reference_type(bt)) { 654 if (_scratch_basic_type_mirrors[i].resolve() == java_mirror) { 655 return java_mirror; 656 } 657 } 658 } 659 660 if (java_lang_Class::is_primitive(java_mirror)) { 661 return scratch_java_mirror(java_lang_Class::as_BasicType(java_mirror)); 662 } else { 663 return scratch_java_mirror(java_lang_Class::as_Klass(java_mirror)); 664 } 665 } 666 667 oop HeapShared::scratch_java_mirror(BasicType t) { 668 assert((uint)t < T_VOID+1, "range check"); 669 assert(!is_reference_type(t), "sanity"); 670 return _scratch_basic_type_mirrors[t].resolve(); 671 } 672 673 oop HeapShared::scratch_java_mirror(Klass* k) { 674 return _scratch_objects_table->get_oop(k); 675 } 676 677 void HeapShared::set_scratch_java_mirror(Klass* k, oop mirror) { 678 track_scratch_object(k->java_mirror(), mirror); 679 _scratch_objects_table->set_oop(k, mirror); 680 } 681 682 void HeapShared::remove_scratch_objects(Klass* k) { 683 // Klass is being deallocated. Java mirror can still be alive, and it should not 684 // point to dead klass. We need to break the link from mirror to the Klass. 685 // See how InstanceKlass::deallocate_contents does it for normal mirrors. 686 oop mirror = _scratch_objects_table->get_oop(k); 687 if (mirror != nullptr) { 688 java_lang_Class::set_klass(mirror, nullptr); 689 } 690 _scratch_objects_table->remove_oop(k); 691 if (k->is_instance_klass()) { 692 _scratch_objects_table->remove(InstanceKlass::cast(k)->constants()); 693 } 694 if (mirror != nullptr) { 695 OopHandle tmp(&mirror); 696 OopHandle* v = _orig_to_scratch_object_table->get(tmp); 697 if (v != nullptr) { 698 oop scratch_mirror = v->resolve(); 699 java_lang_Class::set_klass(scratch_mirror, nullptr); 700 _orig_to_scratch_object_table->remove(tmp); 701 } 702 } 703 } 704 705 //TODO: we eventually want a more direct test for these kinds of things. 706 //For example the JVM could record some bit of context from the creation 707 //of the klass, such as who called the hidden class factory. Using 708 //string compares on names is fragile and will break as soon as somebody 709 //changes the names in the JDK code. See discussion in JDK-8342481 for 710 //related ideas about marking AOT-related classes. 711 bool HeapShared::is_lambda_form_klass(InstanceKlass* ik) { 712 return ik->is_hidden() && 713 (ik->name()->starts_with("java/lang/invoke/LambdaForm$MH+") || 714 ik->name()->starts_with("java/lang/invoke/LambdaForm$DMH+") || 715 ik->name()->starts_with("java/lang/invoke/LambdaForm$BMH+") || 716 ik->name()->starts_with("java/lang/invoke/LambdaForm$VH+")); 717 } 718 719 bool HeapShared::is_lambda_proxy_klass(InstanceKlass* ik) { 720 return ik->is_hidden() && (ik->name()->index_of_at(0, "$$Lambda+", 9) > 0); 721 } 722 723 bool HeapShared::is_string_concat_klass(InstanceKlass* ik) { 724 return ik->is_hidden() && ik->name()->starts_with("java/lang/String$$StringConcat"); 725 } 726 727 bool HeapShared::is_archivable_hidden_klass(InstanceKlass* ik) { 728 return CDSConfig::is_dumping_method_handles() && 729 (is_lambda_form_klass(ik) || is_lambda_proxy_klass(ik) || is_string_concat_klass(ik)); 730 } 731 732 733 void HeapShared::copy_and_rescan_aot_inited_mirror(InstanceKlass* ik) { 734 ik->set_has_aot_initialized_mirror(); 735 736 oop orig_mirror; 737 if (RegeneratedClasses::is_regenerated_object(ik)) { 738 InstanceKlass* orig_ik = RegeneratedClasses::get_original_object(ik); 739 precond(orig_ik->is_initialized()); 740 orig_mirror = orig_ik->java_mirror(); 741 } else { 742 precond(ik->is_initialized()); 743 orig_mirror = ik->java_mirror(); 744 } 745 746 oop m = scratch_java_mirror(ik); 747 int nfields = 0; 748 for (JavaFieldStream fs(ik); !fs.done(); fs.next()) { 749 if (fs.access_flags().is_static()) { 750 fieldDescriptor& fd = fs.field_descriptor(); 751 int offset = fd.offset(); 752 switch (fd.field_type()) { 753 case T_OBJECT: 754 case T_ARRAY: 755 { 756 oop field_obj = orig_mirror->obj_field(offset); 757 m->obj_field_put(offset, field_obj); 758 if (field_obj != nullptr) { 759 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, field_obj); 760 assert(success, "sanity"); 761 } 762 } 763 break; 764 case T_BOOLEAN: 765 m->bool_field_put(offset, orig_mirror->bool_field(offset)); 766 break; 767 case T_BYTE: 768 m->byte_field_put(offset, orig_mirror->byte_field(offset)); 769 break; 770 case T_SHORT: 771 m->short_field_put(offset, orig_mirror->short_field(offset)); 772 break; 773 case T_CHAR: 774 m->char_field_put(offset, orig_mirror->char_field(offset)); 775 break; 776 case T_INT: 777 m->int_field_put(offset, orig_mirror->int_field(offset)); 778 break; 779 case T_LONG: 780 m->long_field_put(offset, orig_mirror->long_field(offset)); 781 break; 782 case T_FLOAT: 783 m->float_field_put(offset, orig_mirror->float_field(offset)); 784 break; 785 case T_DOUBLE: 786 m->double_field_put(offset, orig_mirror->double_field(offset)); 787 break; 788 default: 789 ShouldNotReachHere(); 790 } 791 nfields ++; 792 } 793 } 794 795 oop class_data = java_lang_Class::class_data(orig_mirror); 796 java_lang_Class::set_class_data(m, class_data); 797 if (class_data != nullptr) { 798 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, class_data); 799 assert(success, "sanity"); 800 } 801 802 if (log_is_enabled(Debug, aot, init)) { 803 ResourceMark rm; 804 log_debug(aot, init)("copied %3d field(s) in aot-initialized mirror %s%s%s", nfields, ik->external_name(), 805 ik->is_hidden() ? " (hidden)" : "", 806 ik->is_enum_subclass() ? " (enum)" : ""); 807 } 808 } 809 810 void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) { 811 // We need to retain the identity_hash, because it may have been used by some hashtables 812 // in the shared heap. 813 if (!orig_mirror->fast_no_hash_check()) { 814 intptr_t src_hash = orig_mirror->identity_hash(); 815 if (UseCompactObjectHeaders) { 816 narrowKlass nk = CompressedKlassPointers::encode(orig_mirror->klass()); 817 scratch_m->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); 818 } else { 819 scratch_m->set_mark(markWord::prototype().copy_set_hash(src_hash)); 820 } 821 assert(scratch_m->mark().is_unlocked(), "sanity"); 822 823 DEBUG_ONLY(intptr_t archived_hash = scratch_m->identity_hash()); 824 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash); 825 } 826 827 Klass* k = java_lang_Class::as_Klass(orig_mirror); // is null Universe::void_mirror(); 828 if (CDSConfig::is_dumping_reflection_data() && 829 k != nullptr && k->is_instance_klass() && 830 java_lang_Class::reflection_data(orig_mirror) != nullptr && 831 AOTConstantPoolResolver::can_archive_reflection_data(InstanceKlass::cast(k))) { 832 java_lang_Class::set_reflection_data(scratch_m, java_lang_Class::reflection_data(orig_mirror)); 833 } 834 } 835 836 static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) { 837 if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) { 838 objArrayOop rr = src_ik->constants()->resolved_references_or_null(); 839 if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) { 840 return HeapShared::scratch_resolved_references(src_ik->constants()); 841 } 842 } 843 return nullptr; 844 } 845 846 void HeapShared::archive_strings() { 847 oop shared_strings_array = StringTable::init_shared_strings_array(); 848 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array); 849 assert(success, "shared strings array must not point to arrays or strings that are too large to archive"); 850 StringTable::set_shared_strings_array_index(append_root(shared_strings_array)); 851 } 852 853 int HeapShared::archive_exception_instance(oop exception) { 854 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, exception); 855 assert(success, "sanity"); 856 return append_root(exception); 857 } 858 859 void HeapShared::mark_native_pointers(oop orig_obj) { 860 if (java_lang_Class::is_instance(orig_obj)) { 861 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset()); 862 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset()); 863 } else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) { 864 ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset()); 865 } 866 } 867 868 void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) { 869 OopHandle oh(&src_obj); 870 CachedOopInfo* info = archived_object_cache()->get(oh); 871 assert(info != nullptr, "must be"); 872 has_oop_pointers = info->has_oop_pointers(); 873 has_native_pointers = info->has_native_pointers(); 874 } 875 876 void HeapShared::set_has_native_pointers(oop src_obj) { 877 OopHandle oh(&src_obj); 878 CachedOopInfo* info = archived_object_cache()->get(oh); 879 assert(info != nullptr, "must be"); 880 info->set_has_native_pointers(); 881 } 882 883 // Between start_scanning_for_oops() and end_scanning_for_oops(), we discover all Java heap objects that 884 // should be stored in the AOT cache. The scanning is coordinated by AOTArtifactFinder. 885 void HeapShared::start_scanning_for_oops() { 886 { 887 NoSafepointVerifier nsv; 888 889 // The special subgraph doesn't belong to any class. We use Object_klass() here just 890 // for convenience. 891 _dump_time_special_subgraph = init_subgraph_info(vmClasses::Object_klass(), false); 892 _context = new GrowableArrayCHeap<const char*, mtClassShared>(250); 893 894 // Cache for recording where the archived objects are copied to 895 create_archived_object_cache(); 896 897 if (UseCompressedOops || UseG1GC) { 898 aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]", 899 UseCompressedOops ? p2i(CompressedOops::begin()) : 900 p2i((address)G1CollectedHeap::heap()->reserved().start()), 901 UseCompressedOops ? p2i(CompressedOops::end()) : 902 p2i((address)G1CollectedHeap::heap()->reserved().end())); 903 } 904 905 archive_subgraphs(); 906 } 907 908 init_seen_objects_table(); 909 Universe::archive_exception_instances(); 910 } 911 912 void HeapShared::end_scanning_for_oops() { 913 archive_strings(); 914 delete_seen_objects_table(); 915 } 916 917 void HeapShared::write_heap(ArchiveHeapInfo *heap_info) { 918 { 919 NoSafepointVerifier nsv; 920 if (!SkipArchiveHeapVerification) { 921 CDSHeapVerifier::verify(); 922 } 923 check_special_subgraph_classes(); 924 } 925 926 StringTable::write_shared_table(); 927 GrowableArrayCHeap<oop, mtClassShared>* roots = new GrowableArrayCHeap<oop, mtClassShared>(_pending_roots->length()); 928 for (int i = 0; i < _pending_roots->length(); i++) { 929 roots->append(_pending_roots->at(i).resolve()); 930 } 931 ArchiveHeapWriter::write(roots, heap_info); 932 delete roots; 933 934 ArchiveBuilder::OtherROAllocMark mark; 935 write_subgraph_info_table(); 936 } 937 938 void HeapShared::scan_java_mirror(oop orig_mirror) { 939 oop m = scratch_java_mirror(orig_mirror); 940 if (m != nullptr) { // nullptr if for custom class loader 941 copy_java_mirror(orig_mirror, m); 942 bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, m); 943 assert(success, "sanity"); 944 945 oop extra; 946 if ((extra = java_lang_Class::reflection_data(m)) != nullptr) { 947 success = archive_reachable_objects_from(1, _dump_time_special_subgraph, extra); 948 assert(success, "sanity"); 949 } 950 } 951 } 952 953 void HeapShared::scan_java_class(Klass* orig_k) { 954 scan_java_mirror(orig_k->java_mirror()); 955 956 if (orig_k->is_instance_klass()) { 957 InstanceKlass* orig_ik = InstanceKlass::cast(orig_k); 958 orig_ik->constants()->prepare_resolved_references_for_archiving(); 959 objArrayOop rr = get_archived_resolved_references(orig_ik); 960 if (rr != nullptr) { 961 bool success = HeapShared::archive_reachable_objects_from(1, _dump_time_special_subgraph, rr); 962 assert(success, "must be"); 963 } 964 } 965 } 966 967 void HeapShared::archive_subgraphs() { 968 assert(CDSConfig::is_dumping_heap(), "must be"); 969 970 archive_object_subgraphs(archive_subgraph_entry_fields, 971 false /* is_full_module_graph */); 972 973 if (CDSConfig::is_dumping_full_module_graph()) { 974 archive_object_subgraphs(fmg_archive_subgraph_entry_fields, 975 true /* is_full_module_graph */); 976 Modules::verify_archived_modules(); 977 } 978 } 979 980 // 981 // Subgraph archiving support 982 // 983 HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = nullptr; 984 HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table; 985 986 // Get the subgraph_info for Klass k. A new subgraph_info is created if 987 // there is no existing one for k. The subgraph_info records the "buffered" 988 // address of the class. 989 KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) { 990 assert(CDSConfig::is_dumping_heap(), "dump time only"); 991 bool created; 992 KlassSubGraphInfo* info = 993 _dump_time_subgraph_info_table->put_if_absent(k, KlassSubGraphInfo(k, is_full_module_graph), 994 &created); 995 assert(created, "must not initialize twice"); 996 return info; 997 } 998 999 KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) { 1000 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1001 KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(k); 1002 assert(info != nullptr, "must have been initialized"); 1003 return info; 1004 } 1005 1006 // Add an entry field to the current KlassSubGraphInfo. 1007 void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) { 1008 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1009 if (_subgraph_entry_fields == nullptr) { 1010 _subgraph_entry_fields = 1011 new (mtClass) GrowableArray<int>(10, mtClass); 1012 } 1013 _subgraph_entry_fields->append(static_field_offset); 1014 _subgraph_entry_fields->append(HeapShared::append_root(v)); 1015 } 1016 1017 // Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs. 1018 // Only objects of boot classes can be included in sub-graph. 1019 void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) { 1020 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1021 1022 if (_subgraph_object_klasses == nullptr) { 1023 _subgraph_object_klasses = 1024 new (mtClass) GrowableArray<Klass*>(50, mtClass); 1025 } 1026 1027 if (_k == orig_k) { 1028 // Don't add the Klass containing the sub-graph to it's own klass 1029 // initialization list. 1030 return; 1031 } 1032 1033 if (orig_k->is_instance_klass()) { 1034 #ifdef ASSERT 1035 InstanceKlass* ik = InstanceKlass::cast(orig_k); 1036 if (CDSConfig::is_dumping_method_handles()) { 1037 // -XX:AOTInitTestClass must be used carefully in regression tests to 1038 // include only classes that are safe to aot-initialize. 1039 assert(ik->class_loader() == nullptr || 1040 HeapShared::is_lambda_proxy_klass(ik) || 1041 AOTClassInitializer::has_test_class(), 1042 "we can archive only instances of boot classes or lambda proxy classes"); 1043 } else { 1044 assert(ik->class_loader() == nullptr, "must be boot class"); 1045 } 1046 #endif 1047 // vmClasses::xxx_klass() are not updated, need to check 1048 // the original Klass* 1049 if (orig_k == vmClasses::String_klass() || 1050 orig_k == vmClasses::Object_klass()) { 1051 // Initialized early during VM initialization. No need to be added 1052 // to the sub-graph object class list. 1053 return; 1054 } 1055 check_allowed_klass(InstanceKlass::cast(orig_k)); 1056 } else if (orig_k->is_objArray_klass()) { 1057 Klass* abk = ObjArrayKlass::cast(orig_k)->bottom_klass(); 1058 if (abk->is_instance_klass()) { 1059 assert(InstanceKlass::cast(abk)->defined_by_boot_loader(), 1060 "must be boot class"); 1061 check_allowed_klass(InstanceKlass::cast(ObjArrayKlass::cast(orig_k)->bottom_klass())); 1062 } 1063 if (orig_k == Universe::objectArrayKlass()) { 1064 // Initialized early during Universe::genesis. No need to be added 1065 // to the list. 1066 return; 1067 } 1068 } else { 1069 assert(orig_k->is_typeArray_klass(), "must be"); 1070 // Primitive type arrays are created early during Universe::genesis. 1071 return; 1072 } 1073 1074 if (log_is_enabled(Debug, aot, heap)) { 1075 if (!_subgraph_object_klasses->contains(orig_k)) { 1076 ResourceMark rm; 1077 log_debug(aot, heap)("Adding klass %s", orig_k->external_name()); 1078 } 1079 } 1080 1081 _subgraph_object_klasses->append_if_missing(orig_k); 1082 _has_non_early_klasses |= is_non_early_klass(orig_k); 1083 } 1084 1085 void KlassSubGraphInfo::check_allowed_klass(InstanceKlass* ik) { 1086 #ifndef PRODUCT 1087 if (AOTClassInitializer::has_test_class()) { 1088 // The tests can cache arbitrary types of objects. 1089 return; 1090 } 1091 #endif 1092 1093 if (ik->module()->name() == vmSymbols::java_base()) { 1094 assert(ik->package() != nullptr, "classes in java.base cannot be in unnamed package"); 1095 return; 1096 } 1097 1098 const char* lambda_msg = ""; 1099 if (CDSConfig::is_dumping_method_handles()) { 1100 lambda_msg = ", or a lambda proxy class"; 1101 if (HeapShared::is_lambda_proxy_klass(ik) && 1102 (ik->class_loader() == nullptr || 1103 ik->class_loader() == SystemDictionary::java_platform_loader() || 1104 ik->class_loader() == SystemDictionary::java_system_loader())) { 1105 return; 1106 } 1107 } 1108 1109 #ifndef PRODUCT 1110 if (!ik->module()->is_named() && ik->package() == nullptr && ArchiveHeapTestClass != nullptr) { 1111 // This class is loaded by ArchiveHeapTestClass 1112 return; 1113 } 1114 const char* testcls_msg = ", or a test class in an unnamed package of an unnamed module"; 1115 #else 1116 const char* testcls_msg = ""; 1117 #endif 1118 1119 ResourceMark rm; 1120 log_error(aot, heap)("Class %s not allowed in archive heap. Must be in java.base%s%s", 1121 ik->external_name(), lambda_msg, testcls_msg); 1122 AOTMetaspace::unrecoverable_writing_error(); 1123 } 1124 1125 bool KlassSubGraphInfo::is_non_early_klass(Klass* k) { 1126 if (k->is_objArray_klass()) { 1127 k = ObjArrayKlass::cast(k)->bottom_klass(); 1128 } 1129 if (k->is_instance_klass()) { 1130 if (!SystemDictionaryShared::is_early_klass(InstanceKlass::cast(k))) { 1131 ResourceMark rm; 1132 log_info(aot, heap)("non-early: %s", k->external_name()); 1133 return true; 1134 } else { 1135 return false; 1136 } 1137 } else { 1138 return false; 1139 } 1140 } 1141 1142 // Initialize an archived subgraph_info_record from the given KlassSubGraphInfo. 1143 void ArchivedKlassSubGraphInfoRecord::init(KlassSubGraphInfo* info) { 1144 _k = ArchiveBuilder::get_buffered_klass(info->klass()); 1145 _entry_field_records = nullptr; 1146 _subgraph_object_klasses = nullptr; 1147 _is_full_module_graph = info->is_full_module_graph(); 1148 1149 if (_is_full_module_graph) { 1150 // Consider all classes referenced by the full module graph as early -- we will be 1151 // allocating objects of these classes during JVMTI early phase, so they cannot 1152 // be processed by (non-early) JVMTI ClassFileLoadHook 1153 _has_non_early_klasses = false; 1154 } else { 1155 _has_non_early_klasses = info->has_non_early_klasses(); 1156 } 1157 1158 if (_has_non_early_klasses) { 1159 ResourceMark rm; 1160 log_info(aot, heap)( 1161 "Subgraph of klass %s has non-early klasses and cannot be used when JVMTI ClassFileLoadHook is enabled", 1162 _k->external_name()); 1163 } 1164 1165 // populate the entry fields 1166 GrowableArray<int>* entry_fields = info->subgraph_entry_fields(); 1167 if (entry_fields != nullptr) { 1168 int num_entry_fields = entry_fields->length(); 1169 assert(num_entry_fields % 2 == 0, "sanity"); 1170 _entry_field_records = 1171 ArchiveBuilder::new_ro_array<int>(num_entry_fields); 1172 for (int i = 0 ; i < num_entry_fields; i++) { 1173 _entry_field_records->at_put(i, entry_fields->at(i)); 1174 } 1175 } 1176 1177 // <recorded_klasses> has the Klasses of all the objects that are referenced by this subgraph. 1178 // Copy those that need to be explicitly initialized into <_subgraph_object_klasses>. 1179 GrowableArray<Klass*>* recorded_klasses = info->subgraph_object_klasses(); 1180 if (recorded_klasses != nullptr) { 1181 // AOT-inited classes are automatically marked as "initialized" during bootstrap. When 1182 // programmatically loading a subgraph, we only need to explicitly initialize the classes 1183 // that are not aot-inited. 1184 int num_to_copy = 0; 1185 for (int i = 0; i < recorded_klasses->length(); i++) { 1186 Klass* subgraph_k = ArchiveBuilder::get_buffered_klass(recorded_klasses->at(i)); 1187 if (!subgraph_k->has_aot_initialized_mirror()) { 1188 num_to_copy ++; 1189 } 1190 } 1191 1192 _subgraph_object_klasses = ArchiveBuilder::new_ro_array<Klass*>(num_to_copy); 1193 bool is_special = (_k == ArchiveBuilder::get_buffered_klass(vmClasses::Object_klass())); 1194 for (int i = 0, n = 0; i < recorded_klasses->length(); i++) { 1195 Klass* subgraph_k = ArchiveBuilder::get_buffered_klass(recorded_klasses->at(i)); 1196 if (subgraph_k->has_aot_initialized_mirror()) { 1197 continue; 1198 } 1199 if (log_is_enabled(Info, aot, heap)) { 1200 ResourceMark rm; 1201 const char* owner_name = is_special ? "<special>" : _k->external_name(); 1202 if (subgraph_k->is_instance_klass()) { 1203 InstanceKlass* src_ik = InstanceKlass::cast(ArchiveBuilder::current()->get_source_addr(subgraph_k)); 1204 } 1205 log_info(aot, heap)( 1206 "Archived object klass %s (%2d) => %s", 1207 owner_name, n, subgraph_k->external_name()); 1208 } 1209 _subgraph_object_klasses->at_put(n, subgraph_k); 1210 ArchivePtrMarker::mark_pointer(_subgraph_object_klasses->adr_at(n)); 1211 n++; 1212 } 1213 } 1214 1215 ArchivePtrMarker::mark_pointer(&_k); 1216 ArchivePtrMarker::mark_pointer(&_entry_field_records); 1217 ArchivePtrMarker::mark_pointer(&_subgraph_object_klasses); 1218 } 1219 1220 class HeapShared::CopyKlassSubGraphInfoToArchive : StackObj { 1221 CompactHashtableWriter* _writer; 1222 public: 1223 CopyKlassSubGraphInfoToArchive(CompactHashtableWriter* writer) : _writer(writer) {} 1224 1225 bool do_entry(Klass* klass, KlassSubGraphInfo& info) { 1226 if (info.subgraph_object_klasses() != nullptr || info.subgraph_entry_fields() != nullptr) { 1227 ArchivedKlassSubGraphInfoRecord* record = HeapShared::archive_subgraph_info(&info); 1228 Klass* buffered_k = ArchiveBuilder::get_buffered_klass(klass); 1229 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)buffered_k); 1230 u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record); 1231 _writer->add(hash, delta); 1232 } 1233 return true; // keep on iterating 1234 } 1235 }; 1236 1237 ArchivedKlassSubGraphInfoRecord* HeapShared::archive_subgraph_info(KlassSubGraphInfo* info) { 1238 ArchivedKlassSubGraphInfoRecord* record = 1239 (ArchivedKlassSubGraphInfoRecord*)ArchiveBuilder::ro_region_alloc(sizeof(ArchivedKlassSubGraphInfoRecord)); 1240 record->init(info); 1241 if (info == _dump_time_special_subgraph) { 1242 _run_time_special_subgraph = record; 1243 } 1244 return record; 1245 } 1246 1247 // Build the records of archived subgraph infos, which include: 1248 // - Entry points to all subgraphs from the containing class mirror. The entry 1249 // points are static fields in the mirror. For each entry point, the field 1250 // offset, and value are recorded in the sub-graph 1251 // info. The value is stored back to the corresponding field at runtime. 1252 // - A list of klasses that need to be loaded/initialized before archived 1253 // java object sub-graph can be accessed at runtime. 1254 void HeapShared::write_subgraph_info_table() { 1255 // Allocate the contents of the hashtable(s) inside the RO region of the CDS archive. 1256 DumpTimeKlassSubGraphInfoTable* d_table = _dump_time_subgraph_info_table; 1257 CompactHashtableStats stats; 1258 1259 _run_time_subgraph_info_table.reset(); 1260 1261 CompactHashtableWriter writer(d_table->_count, &stats); 1262 CopyKlassSubGraphInfoToArchive copy(&writer); 1263 d_table->iterate(©); 1264 writer.dump(&_run_time_subgraph_info_table, "subgraphs"); 1265 1266 #ifndef PRODUCT 1267 if (ArchiveHeapTestClass != nullptr) { 1268 size_t len = strlen(ArchiveHeapTestClass) + 1; 1269 Array<char>* array = ArchiveBuilder::new_ro_array<char>((int)len); 1270 strncpy(array->adr_at(0), ArchiveHeapTestClass, len); 1271 _archived_ArchiveHeapTestClass = array; 1272 } 1273 #endif 1274 if (log_is_enabled(Info, aot, heap)) { 1275 print_stats(); 1276 } 1277 } 1278 1279 void HeapShared::add_root_segment(objArrayOop segment_oop) { 1280 assert(segment_oop != nullptr, "must be"); 1281 assert(ArchiveHeapLoader::is_in_use(), "must be"); 1282 if (_root_segments == nullptr) { 1283 _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10); 1284 } 1285 _root_segments->push(OopHandle(Universe::vm_global(), segment_oop)); 1286 } 1287 1288 void HeapShared::init_root_segment_sizes(int max_size_elems) { 1289 _root_segment_max_size_elems = max_size_elems; 1290 } 1291 1292 void HeapShared::serialize_tables(SerializeClosure* soc) { 1293 1294 #ifndef PRODUCT 1295 soc->do_ptr(&_archived_ArchiveHeapTestClass); 1296 if (soc->reading() && _archived_ArchiveHeapTestClass != nullptr) { 1297 _test_class_name = _archived_ArchiveHeapTestClass->adr_at(0); 1298 setup_test_class(_test_class_name); 1299 } 1300 #endif 1301 1302 _run_time_subgraph_info_table.serialize_header(soc); 1303 soc->do_ptr(&_run_time_special_subgraph); 1304 } 1305 1306 static void verify_the_heap(Klass* k, const char* which) { 1307 if (VerifyArchivedFields > 0) { 1308 ResourceMark rm; 1309 log_info(aot, heap)("Verify heap %s initializing static field(s) in %s", 1310 which, k->external_name()); 1311 1312 VM_Verify verify_op; 1313 VMThread::execute(&verify_op); 1314 1315 if (VerifyArchivedFields > 1 && is_init_completed()) { 1316 // At this time, the oop->klass() of some archived objects in the heap may not 1317 // have been loaded into the system dictionary yet. Nevertheless, oop->klass() should 1318 // have enough information (object size, oop maps, etc) so that a GC can be safely 1319 // performed. 1320 // 1321 // -XX:VerifyArchivedFields=2 force a GC to happen in such an early stage 1322 // to check for GC safety. 1323 log_info(aot, heap)("Trigger GC %s initializing static field(s) in %s", 1324 which, k->external_name()); 1325 FlagSetting fs1(VerifyBeforeGC, true); 1326 FlagSetting fs2(VerifyDuringGC, true); 1327 FlagSetting fs3(VerifyAfterGC, true); 1328 Universe::heap()->collect(GCCause::_java_lang_system_gc); 1329 } 1330 } 1331 } 1332 1333 // Before GC can execute, we must ensure that all oops reachable from HeapShared::roots() 1334 // have a valid klass. I.e., oopDesc::klass() must have already been resolved. 1335 // 1336 // Note: if a ArchivedKlassSubGraphInfoRecord contains non-early classes, and JVMTI 1337 // ClassFileLoadHook is enabled, it's possible for this class to be dynamically replaced. In 1338 // this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots. 1339 void HeapShared::resolve_classes(JavaThread* current) { 1340 assert(CDSConfig::is_using_archive(), "runtime only!"); 1341 if (!ArchiveHeapLoader::is_in_use()) { 1342 return; // nothing to do 1343 } 1344 1345 if (!CDSConfig::is_using_aot_linked_classes()) { 1346 assert( _run_time_special_subgraph != nullptr, "must be"); 1347 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses(); 1348 if (klasses != nullptr) { 1349 for (int i = 0; i < klasses->length(); i++) { 1350 Klass* k = klasses->at(i); 1351 ExceptionMark em(current); // no exception can happen here 1352 resolve_or_init(k, /*do_init*/false, current); 1353 } 1354 } 1355 } 1356 1357 resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); 1358 resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); 1359 } 1360 1361 void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { 1362 for (int i = 0; fields[i].valid(); i++) { 1363 ArchivableStaticFieldInfo* info = &fields[i]; 1364 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 1365 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name); 1366 assert(k != nullptr && k->defined_by_boot_loader(), "sanity"); 1367 resolve_classes_for_subgraph_of(current, k); 1368 } 1369 } 1370 1371 void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k) { 1372 JavaThread* THREAD = current; 1373 ExceptionMark em(THREAD); 1374 const ArchivedKlassSubGraphInfoRecord* record = 1375 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 1376 if (HAS_PENDING_EXCEPTION) { 1377 CLEAR_PENDING_EXCEPTION; 1378 } 1379 if (record == nullptr) { 1380 clear_archived_roots_of(k); 1381 } 1382 } 1383 1384 void HeapShared::initialize_java_lang_invoke(TRAPS) { 1385 if (CDSConfig::is_using_aot_linked_classes() || CDSConfig::is_dumping_method_handles()) { 1386 resolve_or_init("java/lang/invoke/Invokers$Holder", true, CHECK); 1387 resolve_or_init("java/lang/invoke/MethodHandle", true, CHECK); 1388 resolve_or_init("java/lang/invoke/MethodHandleNatives", true, CHECK); 1389 resolve_or_init("java/lang/invoke/DirectMethodHandle$Holder", true, CHECK); 1390 resolve_or_init("java/lang/invoke/DelegatingMethodHandle$Holder", true, CHECK); 1391 resolve_or_init("java/lang/invoke/LambdaForm$Holder", true, CHECK); 1392 resolve_or_init("java/lang/invoke/BoundMethodHandle$Species_L", true, CHECK); 1393 } 1394 } 1395 1396 // Initialize the InstanceKlasses of objects that are reachable from the following roots: 1397 // - interned strings 1398 // - Klass::java_mirror() -- including aot-initialized mirrors such as those of Enum klasses. 1399 // - ConstantPool::resolved_references() 1400 // - Universe::<xxx>_exception_instance() 1401 // 1402 // For example, if this enum class is initialized at AOT cache assembly time: 1403 // 1404 // enum Fruit { 1405 // APPLE, ORANGE, BANANA; 1406 // static final Set<Fruit> HAVE_SEEDS = new HashSet<>(Arrays.asList(APPLE, ORANGE)); 1407 // } 1408 // 1409 // the aot-initialized mirror of Fruit has a static field that references HashSet, which 1410 // should be initialized before any Java code can access the Fruit class. Note that 1411 // HashSet itself doesn't necessary need to be an aot-initialized class. 1412 void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) { 1413 if (!ArchiveHeapLoader::is_in_use()) { 1414 return; 1415 } 1416 1417 assert( _run_time_special_subgraph != nullptr, "must be"); 1418 Array<Klass*>* klasses = _run_time_special_subgraph->subgraph_object_klasses(); 1419 if (klasses != nullptr) { 1420 for (int pass = 0; pass < 2; pass ++) { 1421 for (int i = 0; i < klasses->length(); i++) { 1422 Klass* k = klasses->at(i); 1423 if (k->class_loader_data() == nullptr) { 1424 // This class is not yet loaded. We will initialize it in a later phase. 1425 // For example, we have loaded only AOTLinkedClassCategory::BOOT1 classes 1426 // but k is part of AOTLinkedClassCategory::BOOT2. 1427 continue; 1428 } 1429 if (k->class_loader() == class_loader()) { 1430 if (pass == 0) { 1431 if (k->is_instance_klass()) { 1432 InstanceKlass::cast(k)->link_class(CHECK); 1433 } 1434 } else { 1435 resolve_or_init(k, /*do_init*/true, CHECK); 1436 } 1437 } 1438 } 1439 } 1440 } 1441 } 1442 1443 void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) { 1444 JavaThread* THREAD = current; 1445 if (!ArchiveHeapLoader::is_in_use()) { 1446 return; // nothing to do 1447 } 1448 1449 if (k->name()->equals("jdk/internal/module/ArchivedModuleGraph") && 1450 !CDSConfig::is_using_optimized_module_handling() && 1451 // archive was created with --module-path 1452 AOTClassLocationConfig::runtime()->num_module_paths() > 0) { 1453 // ArchivedModuleGraph was created with a --module-path that's different than the runtime --module-path. 1454 // Thus, it might contain references to modules that do not exist at runtime. We cannot use it. 1455 log_info(aot, heap)("Skip initializing ArchivedModuleGraph subgraph: is_using_optimized_module_handling=%s num_module_paths=%d", 1456 BOOL_TO_STR(CDSConfig::is_using_optimized_module_handling()), 1457 AOTClassLocationConfig::runtime()->num_module_paths()); 1458 return; 1459 } 1460 1461 ExceptionMark em(THREAD); 1462 const ArchivedKlassSubGraphInfoRecord* record = 1463 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 1464 1465 if (HAS_PENDING_EXCEPTION) { 1466 CLEAR_PENDING_EXCEPTION; 1467 // None of the field value will be set if there was an exception when initializing the classes. 1468 // The java code will not see any of the archived objects in the 1469 // subgraphs referenced from k in this case. 1470 return; 1471 } 1472 1473 if (record != nullptr) { 1474 init_archived_fields_for(k, record); 1475 } 1476 } 1477 1478 const ArchivedKlassSubGraphInfoRecord* 1479 HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) { 1480 assert(!CDSConfig::is_dumping_heap(), "Should not be called when dumping heap"); 1481 1482 if (!k->in_aot_cache()) { 1483 return nullptr; 1484 } 1485 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1486 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1487 1488 #ifndef PRODUCT 1489 if (_test_class_name != nullptr && k->name()->equals(_test_class_name) && record != nullptr) { 1490 _test_class = k; 1491 _test_class_record = record; 1492 } 1493 #endif 1494 1495 // Initialize from archived data. Currently this is done only 1496 // during VM initialization time. No lock is needed. 1497 if (record == nullptr) { 1498 if (log_is_enabled(Info, aot, heap)) { 1499 ResourceMark rm(THREAD); 1500 log_info(aot, heap)("subgraph %s is not recorded", 1501 k->external_name()); 1502 } 1503 return nullptr; 1504 } else { 1505 if (record->is_full_module_graph() && !CDSConfig::is_using_full_module_graph()) { 1506 if (log_is_enabled(Info, aot, heap)) { 1507 ResourceMark rm(THREAD); 1508 log_info(aot, heap)("subgraph %s cannot be used because full module graph is disabled", 1509 k->external_name()); 1510 } 1511 return nullptr; 1512 } 1513 1514 if (record->has_non_early_klasses() && JvmtiExport::should_post_class_file_load_hook()) { 1515 if (log_is_enabled(Info, aot, heap)) { 1516 ResourceMark rm(THREAD); 1517 log_info(aot, heap)("subgraph %s cannot be used because JVMTI ClassFileLoadHook is enabled", 1518 k->external_name()); 1519 } 1520 return nullptr; 1521 } 1522 1523 if (log_is_enabled(Info, aot, heap)) { 1524 ResourceMark rm; 1525 log_info(aot, heap)("%s subgraph %s ", do_init ? "init" : "resolve", k->external_name()); 1526 } 1527 1528 resolve_or_init(k, do_init, CHECK_NULL); 1529 1530 // Load/link/initialize the klasses of the objects in the subgraph. 1531 // nullptr class loader is used. 1532 Array<Klass*>* klasses = record->subgraph_object_klasses(); 1533 if (klasses != nullptr) { 1534 for (int i = 0; i < klasses->length(); i++) { 1535 Klass* klass = klasses->at(i); 1536 if (!klass->in_aot_cache()) { 1537 return nullptr; 1538 } 1539 resolve_or_init(klass, do_init, CHECK_NULL); 1540 } 1541 } 1542 } 1543 1544 return record; 1545 } 1546 1547 void HeapShared::resolve_or_init(const char* klass_name, bool do_init, TRAPS) { 1548 TempNewSymbol klass_name_sym = SymbolTable::new_symbol(klass_name); 1549 InstanceKlass* k = SystemDictionaryShared::find_builtin_class(klass_name_sym); 1550 if (k == nullptr) { 1551 return; 1552 } 1553 assert(k->defined_by_boot_loader(), "sanity"); 1554 resolve_or_init(k, false, CHECK); 1555 if (do_init) { 1556 resolve_or_init(k, true, CHECK); 1557 } 1558 } 1559 1560 void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) { 1561 if (!do_init) { 1562 if (k->class_loader_data() == nullptr) { 1563 Klass* resolved_k = SystemDictionary::resolve_or_null(k->name(), CHECK); 1564 assert(resolved_k == k, "classes used by archived heap must not be replaced by JVMTI ClassFileLoadHook"); 1565 } 1566 } else { 1567 assert(k->class_loader_data() != nullptr, "must have been resolved by HeapShared::resolve_classes"); 1568 if (k->is_instance_klass()) { 1569 InstanceKlass* ik = InstanceKlass::cast(k); 1570 ik->initialize(CHECK); 1571 } else if (k->is_objArray_klass()) { 1572 ObjArrayKlass* oak = ObjArrayKlass::cast(k); 1573 oak->initialize(CHECK); 1574 } 1575 } 1576 } 1577 1578 void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) { 1579 verify_the_heap(k, "before"); 1580 1581 // Load the subgraph entry fields from the record and store them back to 1582 // the corresponding fields within the mirror. 1583 oop m = k->java_mirror(); 1584 Array<int>* entry_field_records = record->entry_field_records(); 1585 if (entry_field_records != nullptr) { 1586 int efr_len = entry_field_records->length(); 1587 assert(efr_len % 2 == 0, "sanity"); 1588 for (int i = 0; i < efr_len; i += 2) { 1589 int field_offset = entry_field_records->at(i); 1590 int root_index = entry_field_records->at(i+1); 1591 oop v = get_root(root_index, /*clear=*/true); 1592 if (k->has_aot_initialized_mirror()) { 1593 assert(v == m->obj_field(field_offset), "must be aot-initialized"); 1594 } else { 1595 m->obj_field_put(field_offset, v); 1596 } 1597 log_debug(aot, heap)(" " PTR_FORMAT " init field @ %2d = " PTR_FORMAT, p2i(k), field_offset, p2i(v)); 1598 } 1599 1600 // Done. Java code can see the archived sub-graphs referenced from k's 1601 // mirror after this point. 1602 if (log_is_enabled(Info, aot, heap)) { 1603 ResourceMark rm; 1604 log_info(aot, heap)("initialize_from_archived_subgraph %s " PTR_FORMAT "%s%s", 1605 k->external_name(), p2i(k), JvmtiExport::is_early_phase() ? " (early)" : "", 1606 k->has_aot_initialized_mirror() ? " (aot-inited)" : ""); 1607 } 1608 } 1609 1610 verify_the_heap(k, "after "); 1611 } 1612 1613 void HeapShared::clear_archived_roots_of(Klass* k) { 1614 unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k); 1615 const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0); 1616 if (record != nullptr) { 1617 Array<int>* entry_field_records = record->entry_field_records(); 1618 if (entry_field_records != nullptr) { 1619 int efr_len = entry_field_records->length(); 1620 assert(efr_len % 2 == 0, "sanity"); 1621 for (int i = 0; i < efr_len; i += 2) { 1622 int root_index = entry_field_records->at(i+1); 1623 clear_root(root_index); 1624 } 1625 } 1626 } 1627 } 1628 1629 // Push all oop fields (or oop array elemenets in case of an objArray) in 1630 // _referencing_obj onto the _stack. 1631 class HeapShared::OopFieldPusher: public BasicOopIterateClosure { 1632 PendingOopStack* _stack; 1633 GrowableArray<oop> _found_oop_fields; 1634 int _level; 1635 bool _record_klasses_only; 1636 KlassSubGraphInfo* _subgraph_info; 1637 oop _referencing_obj; 1638 bool _is_java_lang_ref; 1639 public: 1640 OopFieldPusher(PendingOopStack* stack, 1641 int level, 1642 bool record_klasses_only, 1643 KlassSubGraphInfo* subgraph_info, 1644 oop orig) : 1645 _stack(stack), 1646 _found_oop_fields(), 1647 _level(level), 1648 _record_klasses_only(record_klasses_only), 1649 _subgraph_info(subgraph_info), 1650 _referencing_obj(orig) { 1651 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(orig); 1652 } 1653 void do_oop(narrowOop *p) { OopFieldPusher::do_oop_work(p); } 1654 void do_oop( oop *p) { OopFieldPusher::do_oop_work(p); } 1655 1656 ~OopFieldPusher() { 1657 while (_found_oop_fields.length() > 0) { 1658 // This produces the exact same traversal order as the previous version 1659 // of OopFieldPusher that recurses on the C stack -- a depth-first search, 1660 // walking the oop fields in _referencing_obj by ascending field offsets. 1661 oop obj = _found_oop_fields.pop(); 1662 _stack->push(PendingOop(obj, _referencing_obj, _level + 1)); 1663 } 1664 } 1665 1666 protected: 1667 template <class T> void do_oop_work(T *p) { 1668 int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_referencing_obj)); 1669 oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_referencing_obj, field_offset); 1670 if (!CompressedOops::is_null(obj)) { 1671 if (_is_java_lang_ref && AOTReferenceObjSupport::skip_field(field_offset)) { 1672 // Do not follow these fields. They will be cleared to null. 1673 return; 1674 } 1675 1676 if (!_record_klasses_only && log_is_enabled(Debug, aot, heap)) { 1677 ResourceMark rm; 1678 log_debug(aot, heap)("(%d) %s[%d] ==> " PTR_FORMAT " size %zu %s", _level, 1679 _referencing_obj->klass()->external_name(), field_offset, 1680 p2i(obj), obj->size() * HeapWordSize, obj->klass()->external_name()); 1681 if (log_is_enabled(Trace, aot, heap)) { 1682 LogTarget(Trace, aot, heap) log; 1683 LogStream out(log); 1684 obj->print_on(&out); 1685 } 1686 } 1687 1688 _found_oop_fields.push(obj); 1689 } 1690 } 1691 1692 public: 1693 oop referencing_obj() { return _referencing_obj; } 1694 KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } 1695 }; 1696 1697 // Checks if an oop has any non-null oop fields 1698 class PointsToOopsChecker : public BasicOopIterateClosure { 1699 bool _result; 1700 1701 template <class T> void check(T *p) { 1702 _result |= (HeapAccess<>::oop_load(p) != nullptr); 1703 } 1704 1705 public: 1706 PointsToOopsChecker() : _result(false) {} 1707 void do_oop(narrowOop *p) { check(p); } 1708 void do_oop( oop *p) { check(p); } 1709 bool result() { return _result; } 1710 }; 1711 1712 HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) { 1713 PointsToOopsChecker points_to_oops_checker; 1714 obj->oop_iterate(&points_to_oops_checker); 1715 return CachedOopInfo(OopHandle(Universe::vm_global(), referrer), points_to_oops_checker.result()); 1716 } 1717 1718 void HeapShared::init_box_classes(TRAPS) { 1719 if (ArchiveHeapLoader::is_in_use()) { 1720 vmClasses::Boolean_klass()->initialize(CHECK); 1721 vmClasses::Character_klass()->initialize(CHECK); 1722 vmClasses::Float_klass()->initialize(CHECK); 1723 vmClasses::Double_klass()->initialize(CHECK); 1724 vmClasses::Byte_klass()->initialize(CHECK); 1725 vmClasses::Short_klass()->initialize(CHECK); 1726 vmClasses::Integer_klass()->initialize(CHECK); 1727 vmClasses::Long_klass()->initialize(CHECK); 1728 vmClasses::Void_klass()->initialize(CHECK); 1729 } 1730 } 1731 1732 void HeapShared::exit_on_error() { 1733 if (_context != nullptr) { 1734 ResourceMark rm; 1735 LogStream ls(Log(cds, heap)::error()); 1736 ls.print_cr("Context"); 1737 for (int i = 0; i < _context->length(); i++) { 1738 const char* s = _context->at(i); 1739 ls.print_cr("- %s", s); 1740 } 1741 } 1742 debug_trace(); 1743 AOTMetaspace::unrecoverable_writing_error(); 1744 } 1745 1746 // (1) If orig_obj has not been archived yet, archive it. 1747 // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), 1748 // trace all objects that are reachable from it, and make sure these objects are archived. 1749 // (3) Record the klasses of all objects that are reachable from orig_obj (including those that 1750 // were already archived when this function is called) 1751 bool HeapShared::archive_reachable_objects_from(int level, 1752 KlassSubGraphInfo* subgraph_info, 1753 oop orig_obj) { 1754 assert(orig_obj != nullptr, "must be"); 1755 PendingOopStack stack; 1756 stack.push(PendingOop(orig_obj, nullptr, level)); 1757 1758 while (stack.length() > 0) { 1759 PendingOop po = stack.pop(); 1760 _object_being_archived = po; 1761 bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer()); 1762 _object_being_archived = PendingOop(); 1763 1764 if (!status) { 1765 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1766 // as the Java code will take care of initializing this field dynamically. 1767 assert(level == 1, "VM should have exited with unarchivable objects for _level > 1"); 1768 return false; 1769 } 1770 } 1771 1772 return true; 1773 } 1774 1775 bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info, 1776 oop orig_obj, oop referrer) { 1777 assert(orig_obj != nullptr, "must be"); 1778 if (!JavaClasses::is_supported_for_archiving(orig_obj)) { 1779 // This object has injected fields that cannot be supported easily, so we disallow them for now. 1780 // If you get an error here, you probably made a change in the JDK library that has added 1781 // these objects that are referenced (directly or indirectly) by static fields. 1782 ResourceMark rm; 1783 log_error(aot, heap)("Cannot archive object " PTR_FORMAT " of class %s", p2i(orig_obj), orig_obj->klass()->external_name()); 1784 debug_trace(); 1785 AOTMetaspace::unrecoverable_writing_error(); 1786 } 1787 1788 if (log_is_enabled(Debug, aot, heap) && java_lang_Class::is_instance(orig_obj)) { 1789 ResourceMark rm; 1790 LogTarget(Debug, aot, heap) log; 1791 LogStream out(log); 1792 out.print("Found java mirror " PTR_FORMAT " ", p2i(orig_obj)); 1793 Klass* k = java_lang_Class::as_Klass(orig_obj); 1794 if (k != nullptr) { 1795 out.print("%s", k->external_name()); 1796 } else { 1797 out.print("primitive"); 1798 } 1799 out.print_cr("; scratch mirror = " PTR_FORMAT, 1800 p2i(scratch_java_mirror(orig_obj))); 1801 } 1802 1803 if (java_lang_Class::is_instance(orig_obj)) { 1804 Klass* k = java_lang_Class::as_Klass(orig_obj); 1805 if (RegeneratedClasses::has_been_regenerated(k)) { 1806 orig_obj = RegeneratedClasses::get_regenerated_object(k)->java_mirror(); 1807 } 1808 } 1809 1810 if (CDSConfig::is_initing_classes_at_dump_time()) { 1811 if (java_lang_Class::is_instance(orig_obj)) { 1812 orig_obj = scratch_java_mirror(orig_obj); 1813 assert(orig_obj != nullptr, "must be archived"); 1814 } 1815 } else if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _dump_time_special_subgraph) { 1816 // Without CDSConfig::is_initing_classes_at_dump_time(), we only allow archived objects to 1817 // point to the mirrors of (1) j.l.Object, (2) primitive classes, and (3) box classes. These are initialized 1818 // very early by HeapShared::init_box_classes(). 1819 if (orig_obj == vmClasses::Object_klass()->java_mirror() 1820 || java_lang_Class::is_primitive(orig_obj) 1821 || orig_obj == vmClasses::Boolean_klass()->java_mirror() 1822 || orig_obj == vmClasses::Character_klass()->java_mirror() 1823 || orig_obj == vmClasses::Float_klass()->java_mirror() 1824 || orig_obj == vmClasses::Double_klass()->java_mirror() 1825 || orig_obj == vmClasses::Byte_klass()->java_mirror() 1826 || orig_obj == vmClasses::Short_klass()->java_mirror() 1827 || orig_obj == vmClasses::Integer_klass()->java_mirror() 1828 || orig_obj == vmClasses::Long_klass()->java_mirror() 1829 || orig_obj == vmClasses::Void_klass()->java_mirror()) { 1830 orig_obj = scratch_java_mirror(orig_obj); 1831 assert(orig_obj != nullptr, "must be archived"); 1832 } else { 1833 // If you get an error here, you probably made a change in the JDK library that has added a Class 1834 // object that is referenced (directly or indirectly) by an ArchivableStaticFieldInfo 1835 // defined at the top of this file. 1836 log_error(aot, heap)("(%d) Unknown java.lang.Class object is in the archived sub-graph", level); 1837 debug_trace(); 1838 AOTMetaspace::unrecoverable_writing_error(); 1839 } 1840 } 1841 1842 if (has_been_seen_during_subgraph_recording(orig_obj)) { 1843 // orig_obj has already been archived and traced. Nothing more to do. 1844 return true; 1845 } else { 1846 set_has_been_seen_during_subgraph_recording(orig_obj); 1847 } 1848 1849 bool already_archived = has_been_archived(orig_obj); 1850 bool record_klasses_only = already_archived; 1851 if (!already_archived) { 1852 ++_num_new_archived_objs; 1853 if (!archive_object(orig_obj, referrer, subgraph_info)) { 1854 // Skip archiving the sub-graph referenced from the current entry field. 1855 ResourceMark rm; 1856 log_error(aot, heap)( 1857 "Cannot archive the sub-graph referenced from %s object (" 1858 PTR_FORMAT ") size %zu, skipped.", 1859 orig_obj->klass()->external_name(), p2i(orig_obj), orig_obj->size() * HeapWordSize); 1860 if (level == 1) { 1861 // Don't archive a subgraph root that's too big. For archives static fields, that's OK 1862 // as the Java code will take care of initializing this field dynamically. 1863 return false; 1864 } else { 1865 // We don't know how to handle an object that has been archived, but some of its reachable 1866 // objects cannot be archived. Bail out for now. We might need to fix this in the future if 1867 // we have a real use case. 1868 AOTMetaspace::unrecoverable_writing_error(); 1869 } 1870 } 1871 } 1872 1873 Klass *orig_k = orig_obj->klass(); 1874 subgraph_info->add_subgraph_object_klass(orig_k); 1875 1876 { 1877 // Find all the oops that are referenced by orig_obj, push them onto the stack 1878 // so we can work on them next. 1879 ResourceMark rm; 1880 OopFieldPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj); 1881 orig_obj->oop_iterate(&pusher); 1882 } 1883 1884 if (CDSConfig::is_initing_classes_at_dump_time()) { 1885 // The enum klasses are archived with aot-initialized mirror. 1886 // See AOTClassInitializer::can_archive_initialized_mirror(). 1887 } else { 1888 if (CDSEnumKlass::is_enum_obj(orig_obj)) { 1889 CDSEnumKlass::handle_enum_obj(level + 1, subgraph_info, orig_obj); 1890 } 1891 } 1892 1893 return true; 1894 } 1895 1896 // 1897 // Start from the given static field in a java mirror and archive the 1898 // complete sub-graph of java heap objects that are reached directly 1899 // or indirectly from the starting object by following references. 1900 // Sub-graph archiving restrictions (current): 1901 // 1902 // - All classes of objects in the archived sub-graph (including the 1903 // entry class) must be boot class only. 1904 // - No java.lang.Class instance (java mirror) can be included inside 1905 // an archived sub-graph. Mirror can only be the sub-graph entry object. 1906 // 1907 // The Java heap object sub-graph archiving process (see OopFieldPusher): 1908 // 1909 // 1) Java object sub-graph archiving starts from a given static field 1910 // within a Class instance (java mirror). If the static field is a 1911 // reference field and points to a non-null java object, proceed to 1912 // the next step. 1913 // 1914 // 2) Archives the referenced java object. If an archived copy of the 1915 // current object already exists, updates the pointer in the archived 1916 // copy of the referencing object to point to the current archived object. 1917 // Otherwise, proceed to the next step. 1918 // 1919 // 3) Follows all references within the current java object and recursively 1920 // archive the sub-graph of objects starting from each reference. 1921 // 1922 // 4) Updates the pointer in the archived copy of referencing object to 1923 // point to the current archived object. 1924 // 1925 // 5) The Klass of the current java object is added to the list of Klasses 1926 // for loading and initializing before any object in the archived graph can 1927 // be accessed at runtime. 1928 // 1929 void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k, 1930 const char* klass_name, 1931 int field_offset, 1932 const char* field_name) { 1933 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1934 assert(k->defined_by_boot_loader(), "must be boot class"); 1935 1936 oop m = k->java_mirror(); 1937 1938 KlassSubGraphInfo* subgraph_info = get_subgraph_info(k); 1939 oop f = m->obj_field(field_offset); 1940 1941 log_debug(aot, heap)("Start archiving from: %s::%s (" PTR_FORMAT ")", klass_name, field_name, p2i(f)); 1942 1943 if (!CompressedOops::is_null(f)) { 1944 if (log_is_enabled(Trace, aot, heap)) { 1945 LogTarget(Trace, aot, heap) log; 1946 LogStream out(log); 1947 f->print_on(&out); 1948 } 1949 1950 bool success = archive_reachable_objects_from(1, subgraph_info, f); 1951 if (!success) { 1952 log_error(aot, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)", 1953 klass_name, field_name); 1954 } else { 1955 // Note: the field value is not preserved in the archived mirror. 1956 // Record the field as a new subGraph entry point. The recorded 1957 // information is restored from the archive at runtime. 1958 subgraph_info->add_subgraph_entry_field(field_offset, f); 1959 log_info(aot, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f)); 1960 } 1961 } else { 1962 // The field contains null, we still need to record the entry point, 1963 // so it can be restored at runtime. 1964 subgraph_info->add_subgraph_entry_field(field_offset, nullptr); 1965 } 1966 } 1967 1968 #ifndef PRODUCT 1969 class VerifySharedOopClosure: public BasicOopIterateClosure { 1970 public: 1971 void do_oop(narrowOop *p) { VerifySharedOopClosure::do_oop_work(p); } 1972 void do_oop( oop *p) { VerifySharedOopClosure::do_oop_work(p); } 1973 1974 protected: 1975 template <class T> void do_oop_work(T *p) { 1976 oop obj = RawAccess<>::oop_load(p); 1977 if (!CompressedOops::is_null(obj)) { 1978 HeapShared::verify_reachable_objects_from(obj); 1979 } 1980 } 1981 }; 1982 1983 void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_offset) { 1984 assert(CDSConfig::is_dumping_heap(), "dump time only"); 1985 assert(k->defined_by_boot_loader(), "must be boot class"); 1986 1987 oop m = k->java_mirror(); 1988 oop f = m->obj_field(field_offset); 1989 if (!CompressedOops::is_null(f)) { 1990 verify_subgraph_from(f); 1991 } 1992 } 1993 1994 void HeapShared::verify_subgraph_from(oop orig_obj) { 1995 if (!has_been_archived(orig_obj)) { 1996 // It's OK for the root of a subgraph to be not archived. See comments in 1997 // archive_reachable_objects_from(). 1998 return; 1999 } 2000 2001 // Verify that all objects reachable from orig_obj are archived. 2002 init_seen_objects_table(); 2003 verify_reachable_objects_from(orig_obj); 2004 delete_seen_objects_table(); 2005 } 2006 2007 void HeapShared::verify_reachable_objects_from(oop obj) { 2008 _num_total_verifications ++; 2009 if (java_lang_Class::is_instance(obj)) { 2010 Klass* k = java_lang_Class::as_Klass(obj); 2011 if (RegeneratedClasses::has_been_regenerated(k)) { 2012 k = RegeneratedClasses::get_regenerated_object(k); 2013 obj = k->java_mirror(); 2014 } 2015 obj = scratch_java_mirror(obj); 2016 assert(obj != nullptr, "must be"); 2017 } 2018 if (!has_been_seen_during_subgraph_recording(obj)) { 2019 set_has_been_seen_during_subgraph_recording(obj); 2020 assert(has_been_archived(obj), "must be"); 2021 VerifySharedOopClosure walker; 2022 obj->oop_iterate(&walker); 2023 } 2024 } 2025 #endif 2026 2027 void HeapShared::check_special_subgraph_classes() { 2028 if (CDSConfig::is_initing_classes_at_dump_time()) { 2029 // We can have aot-initialized classes (such as Enums) that can reference objects 2030 // of arbitrary types. Currently, we trust the JEP 483 implementation to only 2031 // aot-initialize classes that are "safe". 2032 // 2033 // TODO: we need an automatic tool that checks the safety of aot-initialized 2034 // classes (when we extend the set of aot-initialized classes beyond JEP 483) 2035 return; 2036 } else { 2037 // In this case, the special subgraph should contain a few specific types 2038 GrowableArray<Klass*>* klasses = _dump_time_special_subgraph->subgraph_object_klasses(); 2039 int num = klasses->length(); 2040 for (int i = 0; i < num; i++) { 2041 Klass* subgraph_k = klasses->at(i); 2042 Symbol* name = subgraph_k->name(); 2043 if (subgraph_k->is_instance_klass() && 2044 name != vmSymbols::java_lang_Class() && 2045 name != vmSymbols::java_lang_String() && 2046 name != vmSymbols::java_lang_ArithmeticException() && 2047 name != vmSymbols::java_lang_ArrayIndexOutOfBoundsException() && 2048 name != vmSymbols::java_lang_ArrayStoreException() && 2049 name != vmSymbols::java_lang_ClassCastException() && 2050 name != vmSymbols::java_lang_InternalError() && 2051 name != vmSymbols::java_lang_NullPointerException()) { 2052 ResourceMark rm; 2053 fatal("special subgraph cannot have objects of type %s", subgraph_k->external_name()); 2054 } 2055 } 2056 } 2057 } 2058 2059 HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr; 2060 HeapShared::PendingOop HeapShared::_object_being_archived; 2061 int HeapShared::_num_new_walked_objs; 2062 int HeapShared::_num_new_archived_objs; 2063 int HeapShared::_num_old_recorded_klasses; 2064 2065 int HeapShared::_num_total_subgraph_recordings = 0; 2066 int HeapShared::_num_total_walked_objs = 0; 2067 int HeapShared::_num_total_archived_objs = 0; 2068 int HeapShared::_num_total_recorded_klasses = 0; 2069 int HeapShared::_num_total_verifications = 0; 2070 2071 bool HeapShared::has_been_seen_during_subgraph_recording(oop obj) { 2072 return _seen_objects_table->get(obj) != nullptr; 2073 } 2074 2075 void HeapShared::set_has_been_seen_during_subgraph_recording(oop obj) { 2076 assert(!has_been_seen_during_subgraph_recording(obj), "sanity"); 2077 _seen_objects_table->put_when_absent(obj, true); 2078 _seen_objects_table->maybe_grow(); 2079 ++ _num_new_walked_objs; 2080 } 2081 2082 void HeapShared::start_recording_subgraph(InstanceKlass *k, const char* class_name, bool is_full_module_graph) { 2083 log_info(aot, heap)("Start recording subgraph(s) for archived fields in %s", class_name); 2084 init_subgraph_info(k, is_full_module_graph); 2085 init_seen_objects_table(); 2086 _num_new_walked_objs = 0; 2087 _num_new_archived_objs = 0; 2088 _num_old_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses(); 2089 } 2090 2091 void HeapShared::done_recording_subgraph(InstanceKlass *k, const char* class_name) { 2092 int num_new_recorded_klasses = get_subgraph_info(k)->num_subgraph_object_klasses() - 2093 _num_old_recorded_klasses; 2094 log_info(aot, heap)("Done recording subgraph(s) for archived fields in %s: " 2095 "walked %d objs, archived %d new objs, recorded %d classes", 2096 class_name, _num_new_walked_objs, _num_new_archived_objs, 2097 num_new_recorded_klasses); 2098 2099 delete_seen_objects_table(); 2100 2101 _num_total_subgraph_recordings ++; 2102 _num_total_walked_objs += _num_new_walked_objs; 2103 _num_total_archived_objs += _num_new_archived_objs; 2104 _num_total_recorded_klasses += num_new_recorded_klasses; 2105 } 2106 2107 class ArchivableStaticFieldFinder: public FieldClosure { 2108 InstanceKlass* _ik; 2109 Symbol* _field_name; 2110 bool _found; 2111 int _offset; 2112 public: 2113 ArchivableStaticFieldFinder(InstanceKlass* ik, Symbol* field_name) : 2114 _ik(ik), _field_name(field_name), _found(false), _offset(-1) {} 2115 2116 virtual void do_field(fieldDescriptor* fd) { 2117 if (fd->name() == _field_name) { 2118 assert(!_found, "fields can never be overloaded"); 2119 if (is_reference_type(fd->field_type())) { 2120 _found = true; 2121 _offset = fd->offset(); 2122 } 2123 } 2124 } 2125 bool found() { return _found; } 2126 int offset() { return _offset; } 2127 }; 2128 2129 void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], 2130 TRAPS) { 2131 for (int i = 0; fields[i].valid(); i++) { 2132 ArchivableStaticFieldInfo* info = &fields[i]; 2133 TempNewSymbol klass_name = SymbolTable::new_symbol(info->klass_name); 2134 TempNewSymbol field_name = SymbolTable::new_symbol(info->field_name); 2135 ResourceMark rm; // for stringStream::as_string() etc. 2136 2137 #ifndef PRODUCT 2138 bool is_test_class = (ArchiveHeapTestClass != nullptr) && (strcmp(info->klass_name, ArchiveHeapTestClass) == 0); 2139 const char* test_class_name = ArchiveHeapTestClass; 2140 #else 2141 bool is_test_class = false; 2142 const char* test_class_name = ""; // avoid C++ printf checks warnings. 2143 #endif 2144 2145 if (is_test_class) { 2146 log_warning(aot)("Loading ArchiveHeapTestClass %s ...", test_class_name); 2147 } 2148 2149 Klass* k = SystemDictionary::resolve_or_fail(klass_name, true, THREAD); 2150 if (HAS_PENDING_EXCEPTION) { 2151 CLEAR_PENDING_EXCEPTION; 2152 stringStream st; 2153 st.print("Fail to initialize archive heap: %s cannot be loaded by the boot loader", info->klass_name); 2154 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2155 } 2156 2157 if (!k->is_instance_klass()) { 2158 stringStream st; 2159 st.print("Fail to initialize archive heap: %s is not an instance class", info->klass_name); 2160 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2161 } 2162 2163 InstanceKlass* ik = InstanceKlass::cast(k); 2164 assert(InstanceKlass::cast(ik)->defined_by_boot_loader(), 2165 "Only support boot classes"); 2166 2167 if (is_test_class) { 2168 if (ik->module()->is_named()) { 2169 // We don't want ArchiveHeapTestClass to be abused to easily load/initialize arbitrary 2170 // core-lib classes. You need to at least append to the bootclasspath. 2171 stringStream st; 2172 st.print("ArchiveHeapTestClass %s is not in unnamed module", test_class_name); 2173 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2174 } 2175 2176 if (ik->package() != nullptr) { 2177 // This restriction makes HeapShared::is_a_test_class_in_unnamed_module() easy. 2178 stringStream st; 2179 st.print("ArchiveHeapTestClass %s is not in unnamed package", test_class_name); 2180 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2181 } 2182 } else { 2183 if (ik->module()->name() != vmSymbols::java_base()) { 2184 // We don't want to deal with cases when a module is unavailable at runtime. 2185 // FUTURE -- load from archived heap only when module graph has not changed 2186 // between dump and runtime. 2187 stringStream st; 2188 st.print("%s is not in java.base module", info->klass_name); 2189 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2190 } 2191 } 2192 2193 if (is_test_class) { 2194 log_warning(aot)("Initializing ArchiveHeapTestClass %s ...", test_class_name); 2195 } 2196 ik->initialize(CHECK); 2197 2198 ArchivableStaticFieldFinder finder(ik, field_name); 2199 ik->do_local_static_fields(&finder); 2200 if (!finder.found()) { 2201 stringStream st; 2202 st.print("Unable to find the static T_OBJECT field %s::%s", info->klass_name, info->field_name); 2203 THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), st.as_string()); 2204 } 2205 2206 info->klass = ik; 2207 info->offset = finder.offset(); 2208 } 2209 } 2210 2211 void HeapShared::init_subgraph_entry_fields(TRAPS) { 2212 assert(CDSConfig::is_dumping_heap(), "must be"); 2213 _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); 2214 init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); 2215 if (CDSConfig::is_dumping_full_module_graph()) { 2216 init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); 2217 } 2218 } 2219 2220 #ifndef PRODUCT 2221 void HeapShared::setup_test_class(const char* test_class_name) { 2222 ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields; 2223 int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo); 2224 assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below"); 2225 assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list"); 2226 2227 if (test_class_name != nullptr) { 2228 p[num_slots - 2].klass_name = test_class_name; 2229 p[num_slots - 2].field_name = ARCHIVE_TEST_FIELD_NAME; 2230 } 2231 } 2232 2233 // See if ik is one of the test classes that are pulled in by -XX:ArchiveHeapTestClass 2234 // during runtime. This may be called before the module system is initialized so 2235 // we cannot rely on InstanceKlass::module(), etc. 2236 bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) { 2237 if (_test_class != nullptr) { 2238 if (ik == _test_class) { 2239 return true; 2240 } 2241 Array<Klass*>* klasses = _test_class_record->subgraph_object_klasses(); 2242 if (klasses == nullptr) { 2243 return false; 2244 } 2245 2246 for (int i = 0; i < klasses->length(); i++) { 2247 Klass* k = klasses->at(i); 2248 if (k == ik) { 2249 Symbol* name; 2250 if (k->is_instance_klass()) { 2251 name = InstanceKlass::cast(k)->name(); 2252 } else if (k->is_objArray_klass()) { 2253 Klass* bk = ObjArrayKlass::cast(k)->bottom_klass(); 2254 if (!bk->is_instance_klass()) { 2255 return false; 2256 } 2257 name = bk->name(); 2258 } else { 2259 return false; 2260 } 2261 2262 // See KlassSubGraphInfo::check_allowed_klass() - we only allow test classes 2263 // to be: 2264 // (A) java.base classes (which must not be in the unnamed module) 2265 // (B) test classes which must be in the unnamed package of the unnamed module. 2266 // So if we see a '/' character in the class name, it must be in (A); 2267 // otherwise it must be in (B). 2268 if (name->index_of_at(0, "/", 1) >= 0) { 2269 return false; // (A) 2270 } 2271 2272 return true; // (B) 2273 } 2274 } 2275 } 2276 2277 return false; 2278 } 2279 2280 void HeapShared::initialize_test_class_from_archive(JavaThread* current) { 2281 Klass* k = _test_class; 2282 if (k != nullptr && ArchiveHeapLoader::is_in_use()) { 2283 JavaThread* THREAD = current; 2284 ExceptionMark em(THREAD); 2285 const ArchivedKlassSubGraphInfoRecord* record = 2286 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/false, THREAD); 2287 2288 // The _test_class is in the unnamed module, so it can't call CDS.initializeFromArchive() 2289 // from its <clinit> method. So we set up its "archivedObjects" field first, before 2290 // calling its <clinit>. This is not strictly clean, but it's a convenient way to write unit 2291 // test cases (see test/hotspot/jtreg/runtime/cds/appcds/cacheObject/ArchiveHeapTestClass.java). 2292 if (record != nullptr) { 2293 init_archived_fields_for(k, record); 2294 } 2295 resolve_or_init_classes_for_subgraph_of(k, /*do_init=*/true, THREAD); 2296 } 2297 } 2298 #endif 2299 2300 void HeapShared::init_for_dumping(TRAPS) { 2301 if (CDSConfig::is_dumping_heap()) { 2302 setup_test_class(ArchiveHeapTestClass); 2303 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE); 2304 init_subgraph_entry_fields(CHECK); 2305 } 2306 } 2307 2308 void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[], 2309 bool is_full_module_graph) { 2310 _num_total_subgraph_recordings = 0; 2311 _num_total_walked_objs = 0; 2312 _num_total_archived_objs = 0; 2313 _num_total_recorded_klasses = 0; 2314 _num_total_verifications = 0; 2315 2316 // For each class X that has one or more archived fields: 2317 // [1] Dump the subgraph of each archived field 2318 // [2] Create a list of all the class of the objects that can be reached 2319 // by any of these static fields. 2320 // At runtime, these classes are initialized before X's archived fields 2321 // are restored by HeapShared::initialize_from_archived_subgraph(). 2322 for (int i = 0; fields[i].valid(); ) { 2323 ArchivableStaticFieldInfo* info = &fields[i]; 2324 const char* klass_name = info->klass_name; 2325 start_recording_subgraph(info->klass, klass_name, is_full_module_graph); 2326 2327 ContextMark cm(klass_name); 2328 // If you have specified consecutive fields of the same klass in 2329 // fields[], these will be archived in the same 2330 // {start_recording_subgraph ... done_recording_subgraph} pass to 2331 // save time. 2332 for (; fields[i].valid(); i++) { 2333 ArchivableStaticFieldInfo* f = &fields[i]; 2334 if (f->klass_name != klass_name) { 2335 break; 2336 } 2337 2338 ContextMark cm(f->field_name); 2339 archive_reachable_objects_from_static_field(f->klass, f->klass_name, 2340 f->offset, f->field_name); 2341 } 2342 done_recording_subgraph(info->klass, klass_name); 2343 } 2344 2345 log_info(aot, heap)("Archived subgraph records = %d", 2346 _num_total_subgraph_recordings); 2347 log_info(aot, heap)(" Walked %d objects", _num_total_walked_objs); 2348 log_info(aot, heap)(" Archived %d objects", _num_total_archived_objs); 2349 log_info(aot, heap)(" Recorded %d klasses", _num_total_recorded_klasses); 2350 2351 #ifndef PRODUCT 2352 for (int i = 0; fields[i].valid(); i++) { 2353 ArchivableStaticFieldInfo* f = &fields[i]; 2354 verify_subgraph_from_static_field(f->klass, f->offset); 2355 } 2356 log_info(aot, heap)(" Verified %d references", _num_total_verifications); 2357 #endif 2358 } 2359 2360 // Keep track of the contents of the archived interned string table. This table 2361 // is used only by CDSHeapVerifier. 2362 void HeapShared::add_to_dumped_interned_strings(oop string) { 2363 assert_at_safepoint(); // DumpedInternedStrings uses raw oops 2364 assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be"); 2365 bool created; 2366 _dumped_interned_strings->put_if_absent(string, true, &created); 2367 if (created) { 2368 // Prevent string deduplication from changing the value field to 2369 // something not in the archive. 2370 java_lang_String::set_deduplication_forbidden(string); 2371 _dumped_interned_strings->maybe_grow(); 2372 } 2373 } 2374 2375 bool HeapShared::is_dumped_interned_string(oop o) { 2376 return _dumped_interned_strings->get(o) != nullptr; 2377 } 2378 2379 // These tables should be used only within the CDS safepoint, so 2380 // delete them before we exit the safepoint. Otherwise the table will 2381 // contain bad oops after a GC. 2382 void HeapShared::delete_tables_with_raw_oops() { 2383 assert(_seen_objects_table == nullptr, "should have been deleted"); 2384 2385 delete _dumped_interned_strings; 2386 _dumped_interned_strings = nullptr; 2387 2388 ArchiveHeapWriter::delete_tables_with_raw_oops(); 2389 } 2390 2391 void HeapShared::debug_trace() { 2392 ResourceMark rm; 2393 oop referrer = _object_being_archived.referrer(); 2394 if (referrer != nullptr) { 2395 LogStream ls(Log(aot, heap)::error()); 2396 ls.print_cr("Reference trace"); 2397 CDSHeapVerifier::trace_to_root(&ls, referrer); 2398 } 2399 } 2400 2401 #ifndef PRODUCT 2402 // At dump-time, find the location of all the non-null oop pointers in an archived heap 2403 // region. This way we can quickly relocate all the pointers without using 2404 // BasicOopIterateClosure at runtime. 2405 class FindEmbeddedNonNullPointers: public BasicOopIterateClosure { 2406 void* _start; 2407 BitMap *_oopmap; 2408 int _num_total_oops; 2409 int _num_null_oops; 2410 public: 2411 FindEmbeddedNonNullPointers(void* start, BitMap* oopmap) 2412 : _start(start), _oopmap(oopmap), _num_total_oops(0), _num_null_oops(0) {} 2413 2414 virtual void do_oop(narrowOop* p) { 2415 assert(UseCompressedOops, "sanity"); 2416 _num_total_oops ++; 2417 narrowOop v = *p; 2418 if (!CompressedOops::is_null(v)) { 2419 size_t idx = p - (narrowOop*)_start; 2420 _oopmap->set_bit(idx); 2421 } else { 2422 _num_null_oops ++; 2423 } 2424 } 2425 virtual void do_oop(oop* p) { 2426 assert(!UseCompressedOops, "sanity"); 2427 _num_total_oops ++; 2428 if ((*p) != nullptr) { 2429 size_t idx = p - (oop*)_start; 2430 _oopmap->set_bit(idx); 2431 } else { 2432 _num_null_oops ++; 2433 } 2434 } 2435 int num_total_oops() const { return _num_total_oops; } 2436 int num_null_oops() const { return _num_null_oops; } 2437 }; 2438 #endif 2439 2440 void HeapShared::count_allocation(size_t size) { 2441 _total_obj_count ++; 2442 _total_obj_size += size; 2443 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 2444 if (size <= (size_t(1) << i)) { 2445 _alloc_count[i] ++; 2446 _alloc_size[i] += size; 2447 return; 2448 } 2449 } 2450 } 2451 2452 static double avg_size(size_t size, size_t count) { 2453 double avg = 0; 2454 if (count > 0) { 2455 avg = double(size * HeapWordSize) / double(count); 2456 } 2457 return avg; 2458 } 2459 2460 void HeapShared::print_stats() { 2461 size_t huge_count = _total_obj_count; 2462 size_t huge_size = _total_obj_size; 2463 2464 for (int i = 0; i < ALLOC_STAT_SLOTS; i++) { 2465 size_t byte_size_limit = (size_t(1) << i) * HeapWordSize; 2466 size_t count = _alloc_count[i]; 2467 size_t size = _alloc_size[i]; 2468 log_info(aot, heap)("%8zu objects are <= %-6zu" 2469 " bytes (total %8zu bytes, avg %8.1f bytes)", 2470 count, byte_size_limit, size * HeapWordSize, avg_size(size, count)); 2471 huge_count -= count; 2472 huge_size -= size; 2473 } 2474 2475 log_info(aot, heap)("%8zu huge objects (total %8zu bytes" 2476 ", avg %8.1f bytes)", 2477 huge_count, huge_size * HeapWordSize, 2478 avg_size(huge_size, huge_count)); 2479 log_info(aot, heap)("%8zu total objects (total %8zu bytes" 2480 ", avg %8.1f bytes)", 2481 _total_obj_count, _total_obj_size * HeapWordSize, 2482 avg_size(_total_obj_size, _total_obj_count)); 2483 } 2484 2485 bool HeapShared::is_archived_boot_layer_available(JavaThread* current) { 2486 TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS); 2487 InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle()); 2488 if (k == nullptr) { 2489 return false; 2490 } else { 2491 TempNewSymbol field_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_FIELD); 2492 TempNewSymbol field_signature = SymbolTable::new_symbol("Ljdk/internal/module/ArchivedBootLayer;"); 2493 fieldDescriptor fd; 2494 if (k->find_field(field_name, field_signature, true, &fd) != nullptr) { 2495 oop m = k->java_mirror(); 2496 oop f = m->obj_field(fd.offset()); 2497 if (CompressedOops::is_null(f)) { 2498 return false; 2499 } 2500 } else { 2501 return false; 2502 } 2503 } 2504 return true; 2505 } 2506 2507 #endif // INCLUDE_CDS_JAVA_HEAP