1 /* 2 * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveHeapLoader.inline.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/heapShared.hpp" 29 #include "cds/metaspaceShared.hpp" 30 #include "classfile/classLoaderDataShared.hpp" 31 #include "classfile/systemDictionaryShared.hpp" 32 #include "classfile/vmClasses.hpp" 33 #include "gc/shared/collectedHeap.hpp" 34 #include "logging/log.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/resourceArea.hpp" 37 #include "memory/universe.hpp" 38 #include "sanitizers/ub.hpp" 39 #include "utilities/bitMap.inline.hpp" 40 #include "utilities/copy.hpp" 41 42 #if INCLUDE_CDS_JAVA_HEAP 43 44 bool ArchiveHeapLoader::_is_mapped = false; 45 bool ArchiveHeapLoader::_is_loaded = false; 46 47 bool ArchiveHeapLoader::_narrow_oop_base_initialized = false; 48 address ArchiveHeapLoader::_narrow_oop_base; 49 int ArchiveHeapLoader::_narrow_oop_shift; 50 51 // Support for loaded heap. 52 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0; 53 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0; 54 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX; 55 uintptr_t ArchiveHeapLoader::_dumptime_top = 0; 56 intx ArchiveHeapLoader::_runtime_offset = 0; 57 bool ArchiveHeapLoader::_loading_failed = false; 58 59 // Support for mapped heap. 60 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0; 61 bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false; 62 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0; 63 64 // Every mapped region is offset by _mapped_heap_delta from its requested address. 65 // See FileMapInfo::heap_region_requested_address(). 66 ATTRIBUTE_NO_UBSAN 67 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) { 68 assert(!_mapped_heap_relocation_initialized, "only once"); 69 if (!UseCompressedOops) { 70 assert(dumptime_oop_shift == 0, "sanity"); 71 } 72 assert(can_map(), "sanity"); 73 init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift); 74 _mapped_heap_bottom = (intptr_t)mapped_heap_bottom; 75 _mapped_heap_delta = delta; 76 _mapped_heap_relocation_initialized = true; 77 } 78 79 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) { 80 assert(!_narrow_oop_base_initialized, "only once"); 81 _narrow_oop_base_initialized = true; 82 _narrow_oop_base = base; 83 _narrow_oop_shift = shift; 84 } 85 86 void ArchiveHeapLoader::fixup_region() { 87 FileMapInfo* mapinfo = FileMapInfo::current_info(); 88 if (is_mapped()) { 89 mapinfo->fixup_mapped_heap_region(); 90 } else if (_loading_failed) { 91 fill_failed_loaded_heap(); 92 } 93 if (is_in_use()) { 94 if (!CDSConfig::is_using_full_module_graph()) { 95 // Need to remove all the archived java.lang.Module objects from HeapShared::roots(). 96 ClassLoaderDataShared::clear_archived_oops(); 97 } 98 } 99 } 100 101 // ------------------ Support for Region MAPPING ----------------------------------------- 102 103 // Patch all the embedded oop pointers inside an archived heap region, 104 // to be consistent with the runtime oop encoding. 105 class PatchCompressedEmbeddedPointers: public BitMapClosure { 106 narrowOop* _start; 107 108 public: 109 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {} 110 111 bool do_bit(size_t offset) { 112 narrowOop* p = _start + offset; 113 narrowOop v = *p; 114 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 115 oop o = ArchiveHeapLoader::decode_from_mapped_archive(v); 116 RawAccess<IS_NOT_NULL>::oop_store(p, o); 117 return true; 118 } 119 }; 120 121 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure { 122 narrowOop* _start; 123 uint32_t _delta; 124 125 public: 126 PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {} 127 128 bool do_bit(size_t offset) { 129 narrowOop* p = _start + offset; 130 narrowOop v = *p; 131 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 132 narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta); 133 assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)"); 134 #ifdef ASSERT 135 oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v); 136 oop o2 = CompressedOops::decode_not_null(new_v); 137 assert(o1 == o2, "quick delta must work"); 138 #endif 139 RawAccess<IS_NOT_NULL>::oop_store(p, new_v); 140 return true; 141 } 142 }; 143 144 class PatchUncompressedEmbeddedPointers: public BitMapClosure { 145 oop* _start; 146 intptr_t _delta; 147 148 public: 149 PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) : 150 _start(start), 151 _delta(runtime_offset) {} 152 153 PatchUncompressedEmbeddedPointers(oop* start) : 154 _start(start), 155 _delta(ArchiveHeapLoader::mapped_heap_delta()) {} 156 157 bool do_bit(size_t offset) { 158 oop* p = _start + offset; 159 intptr_t dumptime_oop = (intptr_t)((void*)*p); 160 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time"); 161 intptr_t runtime_oop = dumptime_oop + _delta; 162 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop)); 163 return true; 164 } 165 }; 166 167 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm, 168 FileMapInfo* info, 169 MemRegion region) { 170 narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address(); 171 narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start())); 172 log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x", 173 (uint)dt_encoded_bottom, (uint)rt_encoded_bottom); 174 175 // Optimization: if dumptime shift is the same as runtime shift, we can perform a 176 // quick conversion from "dumptime narrowOop" -> "runtime narrowOop". 177 narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos(); 178 if (_narrow_oop_shift == CompressedOops::shift()) { 179 uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom; 180 log_info(cds)("CDS heap data relocation quick delta = 0x%x", quick_delta); 181 if (quick_delta == 0) { 182 log_info(cds)("CDS heap data relocation unnecessary, quick_delta = 0"); 183 } else { 184 PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta); 185 bm.iterate(&patcher); 186 } 187 } else { 188 log_info(cds)("CDS heap data quick relocation not possible"); 189 PatchCompressedEmbeddedPointers patcher(patching_start); 190 bm.iterate(&patcher); 191 } 192 } 193 194 // Patch all the non-null pointers that are embedded in the archived heap objects 195 // in this (mapped) region 196 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info, 197 MemRegion region, address oopmap, 198 size_t oopmap_size_in_bits) { 199 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits); 200 if (UseCompressedOops) { 201 patch_compressed_embedded_pointers(bm, info, region); 202 } else { 203 PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos()); 204 bm.iterate(&patcher); 205 } 206 } 207 208 // ------------------ Support for Region LOADING ----------------------------------------- 209 210 // The CDS archive remembers each heap object by its address at dump time, but 211 // the heap object may be loaded at a different address at run time. This structure is used 212 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index) 213 // to their runtime addresses. 214 struct LoadedArchiveHeapRegion { 215 int _region_index; // index for FileMapInfo::space_at(index) 216 size_t _region_size; // number of bytes in this region 217 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region 218 intx _runtime_offset; // If an object's dump time address P is within in this region, its 219 // runtime address is P + _runtime_offset 220 uintptr_t top() { 221 return _dumptime_base + _region_size; 222 } 223 }; 224 225 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) { 226 _dumptime_base = loaded_region->_dumptime_base; 227 _dumptime_top = loaded_region->top(); 228 _runtime_offset = loaded_region->_runtime_offset; 229 } 230 231 bool ArchiveHeapLoader::can_load() { 232 return Universe::heap()->can_load_archived_objects(); 233 } 234 235 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure { 236 narrowOop* _start; 237 intx _offset; 238 uintptr_t _base; 239 uintptr_t _top; 240 241 public: 242 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region) 243 : _start(start), 244 _offset(loaded_region->_runtime_offset), 245 _base(loaded_region->_dumptime_base), 246 _top(loaded_region->top()) {} 247 248 bool do_bit(size_t offset) { 249 assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented"); 250 narrowOop* p = _start + offset; 251 narrowOop v = *p; 252 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time"); 253 uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v)); 254 assert(_base <= o && o < _top, "must be"); 255 256 o += _offset; 257 ArchiveHeapLoader::assert_in_loaded_heap(o); 258 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o)); 259 return true; 260 } 261 }; 262 263 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, 264 MemRegion& archive_space) { 265 size_t total_bytes = 0; 266 FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp); 267 r->assert_is_heap_region(); 268 if (r->used() == 0) { 269 return false; 270 } 271 272 assert(is_aligned(r->used(), HeapWordSize), "must be"); 273 total_bytes += r->used(); 274 loaded_region->_region_index = MetaspaceShared::hp; 275 loaded_region->_region_size = r->used(); 276 loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address(); 277 278 assert(is_aligned(total_bytes, HeapWordSize), "must be"); 279 size_t word_size = total_bytes / HeapWordSize; 280 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size); 281 if (buffer == nullptr) { 282 return false; 283 } 284 285 archive_space = MemRegion(buffer, word_size); 286 _loaded_heap_bottom = (uintptr_t)archive_space.start(); 287 _loaded_heap_top = _loaded_heap_bottom + total_bytes; 288 289 loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base; 290 291 return true; 292 } 293 294 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, 295 uintptr_t load_address) { 296 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region(); 297 if (bitmap_base == 0) { 298 _loading_failed = true; 299 return false; // OOM or CRC error 300 } 301 302 FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index); 303 if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) { 304 // There's no easy way to free the buffer, so we will fill it with zero later 305 // in fill_failed_loaded_heap(), and it will eventually be GC'ed. 306 log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index); 307 _loading_failed = true; 308 return false; 309 } 310 assert(r->mapped_base() == (char*)load_address, "sanity"); 311 log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT 312 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT, 313 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size, 314 loaded_region->_region_size, loaded_region->_runtime_offset); 315 316 uintptr_t oopmap = bitmap_base + r->oopmap_offset(); 317 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits()); 318 319 if (UseCompressedOops) { 320 PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region); 321 bm.iterate(&patcher); 322 } else { 323 PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region->_runtime_offset); 324 bm.iterate(&patcher); 325 } 326 return true; 327 } 328 329 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) { 330 assert(can_load(), "loaded heap for must be supported"); 331 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift()); 332 333 LoadedArchiveHeapRegion loaded_region; 334 memset(&loaded_region, 0, sizeof(loaded_region)); 335 336 MemRegion archive_space; 337 if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) { 338 return false; 339 } 340 341 if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) { 342 assert(_loading_failed, "must be"); 343 return false; 344 } 345 346 init_loaded_heap_relocation(&loaded_region); 347 _is_loaded = true; 348 349 return true; 350 } 351 352 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure { 353 ResourceHashtable<uintptr_t, bool>* _table; 354 355 public: 356 VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {} 357 358 virtual void do_oop(narrowOop* p) { 359 // This should be called before the loaded region is modified, so all the embedded pointers 360 // must be null, or must point to a valid object in the loaded region. 361 narrowOop v = *p; 362 if (!CompressedOops::is_null(v)) { 363 oop o = CompressedOops::decode_not_null(v); 364 uintptr_t u = cast_from_oop<uintptr_t>(o); 365 ArchiveHeapLoader::assert_in_loaded_heap(u); 366 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region"); 367 } 368 } 369 virtual void do_oop(oop* p) { 370 oop v = *p; 371 if(v != nullptr) { 372 uintptr_t u = cast_from_oop<uintptr_t>(v); 373 ArchiveHeapLoader::assert_in_loaded_heap(u); 374 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region"); 375 } 376 } 377 }; 378 379 void ArchiveHeapLoader::finish_initialization() { 380 if (is_loaded()) { 381 // These operations are needed only when the heap is loaded (not mapped). 382 finish_loaded_heap(); 383 if (VerifyArchivedFields > 0) { 384 verify_loaded_heap(); 385 } 386 } 387 if (is_in_use()) { 388 patch_native_pointers(); 389 intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom; 390 391 // The heap roots are stored in one or more segments that are laid out consecutively. 392 // The size of each segment (except for the last one) is max_size_in_{elems,bytes}. 393 HeapRootSegments segments = FileMapInfo::current_info()->heap_root_segments(); 394 HeapShared::init_root_segment_sizes(segments.max_size_in_elems()); 395 intptr_t first_segment_addr = bottom + segments.base_offset(); 396 for (size_t c = 0; c < segments.count(); c++) { 397 oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes())); 398 assert(segment_oop->is_objArray(), "Must be"); 399 HeapShared::add_root_segment((objArrayOop)segment_oop); 400 } 401 } 402 } 403 404 void ArchiveHeapLoader::finish_loaded_heap() { 405 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 406 HeapWord* top = (HeapWord*)_loaded_heap_top; 407 408 MemRegion archive_space = MemRegion(bottom, top); 409 Universe::heap()->complete_loaded_archive_space(archive_space); 410 } 411 412 void ArchiveHeapLoader::verify_loaded_heap() { 413 log_info(cds, heap)("Verify all oops and pointers in loaded heap"); 414 415 ResourceMark rm; 416 ResourceHashtable<uintptr_t, bool> table; 417 VerifyLoadedHeapEmbeddedPointers verifier(&table); 418 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 419 HeapWord* top = (HeapWord*)_loaded_heap_top; 420 421 for (HeapWord* p = bottom; p < top; ) { 422 oop o = cast_to_oop(p); 423 table.put(cast_from_oop<uintptr_t>(o), true); 424 p += o->size(); 425 } 426 427 for (HeapWord* p = bottom; p < top; ) { 428 oop o = cast_to_oop(p); 429 o->oop_iterate(&verifier); 430 p += o->size(); 431 } 432 } 433 434 void ArchiveHeapLoader::fill_failed_loaded_heap() { 435 assert(_loading_failed, "must be"); 436 if (_loaded_heap_bottom != 0) { 437 assert(_loaded_heap_top != 0, "must be"); 438 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom; 439 HeapWord* top = (HeapWord*)_loaded_heap_top; 440 Universe::heap()->fill_with_objects(bottom, top - bottom); 441 } 442 } 443 444 oop ArchiveHeapLoader::oop_from_offset(int offset) { 445 // Once GC starts, the offsets saved in CachedCodeDirectoryInternal::_permanent_oop_offsets 446 // will become invalid. I don't know what function can check if GCs are allowed, but surely 447 // GCs can't happen before the Object class is loaded. 448 assert(CDSConfig::is_using_archive(), "sanity"); 449 assert(vmClasses::Object_klass()->class_loader_data() == nullptr, 450 "can be called only very early during VM start-up"); 451 if (is_loaded()) { 452 return cast_to_oop(_loaded_heap_bottom + offset); 453 } else { 454 assert(is_mapped(), "must be"); 455 return cast_to_oop(_mapped_heap_bottom + offset); 456 } 457 } 458 459 class PatchNativePointers: public BitMapClosure { 460 Metadata** _start; 461 462 public: 463 PatchNativePointers(Metadata** start) : _start(start) {} 464 465 bool do_bit(size_t offset) { 466 Metadata** p = _start + offset; 467 *p = (Metadata*)(address(*p) + MetaspaceShared::relocation_delta()); 468 return true; 469 } 470 }; 471 472 void ArchiveHeapLoader::patch_native_pointers() { 473 if (MetaspaceShared::relocation_delta() == 0) { 474 return; 475 } 476 477 FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp); 478 if (r->mapped_base() != nullptr && r->has_ptrmap()) { 479 log_info(cds, heap)("Patching native pointers in heap region"); 480 BitMapView bm = FileMapInfo::current_info()->ptrmap_view(MetaspaceShared::hp); 481 PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos()); 482 bm.iterate(&patcher); 483 } 484 } 485 #endif // INCLUDE_CDS_JAVA_HEAP