1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMetaspace.hpp"
26 #include "cds/archiveHeapLoader.inline.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/heapShared.hpp"
29 #include "classfile/classLoaderDataShared.hpp"
30 #include "classfile/systemDictionaryShared.hpp"
31 #include "classfile/vmClasses.hpp"
32 #include "gc/shared/collectedHeap.hpp"
33 #include "logging/log.hpp"
34 #include "memory/iterator.inline.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "sanitizers/ub.hpp"
38 #include "utilities/bitMap.inline.hpp"
39 #include "utilities/copy.hpp"
40
41 #if INCLUDE_CDS_JAVA_HEAP
42
43 bool ArchiveHeapLoader::_is_mapped = false;
44 bool ArchiveHeapLoader::_is_loaded = false;
45
46 bool ArchiveHeapLoader::_narrow_oop_base_initialized = false;
47 address ArchiveHeapLoader::_narrow_oop_base;
48 int ArchiveHeapLoader::_narrow_oop_shift;
49
50 // Support for loaded heap.
51 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
52 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
53 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
54 uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
55 intx ArchiveHeapLoader::_runtime_offset = 0;
56 bool ArchiveHeapLoader::_loading_failed = false;
57
58 // Support for mapped heap.
59 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
60 bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
61 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
62
63 // Every mapped region is offset by _mapped_heap_delta from its requested address.
64 // See FileMapInfo::heap_region_requested_address().
65 ATTRIBUTE_NO_UBSAN
66 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
67 assert(!_mapped_heap_relocation_initialized, "only once");
68 if (!UseCompressedOops) {
69 assert(dumptime_oop_shift == 0, "sanity");
70 }
71 assert(can_map(), "sanity");
72 init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
73 _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
74 _mapped_heap_delta = delta;
75 _mapped_heap_relocation_initialized = true;
76 }
77
78 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
79 assert(!_narrow_oop_base_initialized, "only once");
80 _narrow_oop_base_initialized = true;
81 _narrow_oop_base = base;
82 _narrow_oop_shift = shift;
83 }
84
85 void ArchiveHeapLoader::fixup_region() {
86 FileMapInfo* mapinfo = FileMapInfo::current_info();
87 if (is_mapped()) {
88 mapinfo->fixup_mapped_heap_region();
89 } else if (_loading_failed) {
90 fill_failed_loaded_heap();
91 }
92 if (is_in_use()) {
93 if (!CDSConfig::is_using_full_module_graph()) {
94 // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
95 ClassLoaderDataShared::clear_archived_oops();
96 }
97 }
98 }
99
100 // ------------------ Support for Region MAPPING -----------------------------------------
101
102 // Patch all the embedded oop pointers inside an archived heap region,
103 // to be consistent with the runtime oop encoding.
104 class PatchCompressedEmbeddedPointers: public BitMapClosure {
105 narrowOop* _start;
106
107 public:
108 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
109
110 bool do_bit(size_t offset) {
111 narrowOop* p = _start + offset;
112 narrowOop v = *p;
113 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
114 oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
115 RawAccess<IS_NOT_NULL>::oop_store(p, o);
116 return true;
117 }
118 };
119
120 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
121 narrowOop* _start;
122 uint32_t _delta;
123
124 public:
125 PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
126
127 bool do_bit(size_t offset) {
128 narrowOop* p = _start + offset;
129 narrowOop v = *p;
130 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
131 narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
132 assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
133 #ifdef ASSERT
134 oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
135 oop o2 = CompressedOops::decode_not_null(new_v);
136 assert(o1 == o2, "quick delta must work");
137 #endif
138 RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
139 return true;
140 }
141 };
142
143 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
144 oop* _start;
145 intptr_t _delta;
146
147 public:
148 PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
149 _start(start),
150 _delta(runtime_offset) {}
151
152 PatchUncompressedEmbeddedPointers(oop* start) :
153 _start(start),
154 _delta(ArchiveHeapLoader::mapped_heap_delta()) {}
155
156 bool do_bit(size_t offset) {
157 oop* p = _start + offset;
158 intptr_t dumptime_oop = (intptr_t)((void*)*p);
159 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
160 intptr_t runtime_oop = dumptime_oop + _delta;
161 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
162 return true;
163 }
164 };
165
166 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
167 FileMapInfo* info,
168 MemRegion region) {
169 narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
170 narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
171 log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
172 (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
173
174 // Optimization: if dumptime shift is the same as runtime shift, we can perform a
175 // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
176 narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
177 if (_narrow_oop_shift == CompressedOops::shift()) {
178 uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
179 log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
180 if (quick_delta == 0) {
181 log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
182 } else {
183 PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
184 bm.iterate(&patcher);
185 }
186 } else {
187 log_info(aot)("heap data quick relocation not possible");
188 PatchCompressedEmbeddedPointers patcher(patching_start);
189 bm.iterate(&patcher);
190 }
191 }
192
193 // Patch all the non-null pointers that are embedded in the archived heap objects
194 // in this (mapped) region
195 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
196 MemRegion region, address oopmap,
197 size_t oopmap_size_in_bits) {
198 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
199 if (UseCompressedOops) {
200 patch_compressed_embedded_pointers(bm, info, region);
201 } else {
202 PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
203 bm.iterate(&patcher);
204 }
205 }
206
207 // ------------------ Support for Region LOADING -----------------------------------------
208
209 // The CDS archive remembers each heap object by its address at dump time, but
210 // the heap object may be loaded at a different address at run time. This structure is used
211 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
212 // to their runtime addresses.
213 struct LoadedArchiveHeapRegion {
214 int _region_index; // index for FileMapInfo::space_at(index)
215 size_t _region_size; // number of bytes in this region
216 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
217 intx _runtime_offset; // If an object's dump time address P is within in this region, its
218 // runtime address is P + _runtime_offset
219 uintptr_t top() {
220 return _dumptime_base + _region_size;
221 }
222 };
223
224 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
225 _dumptime_base = loaded_region->_dumptime_base;
226 _dumptime_top = loaded_region->top();
227 _runtime_offset = loaded_region->_runtime_offset;
228 }
229
230 bool ArchiveHeapLoader::can_load() {
231 return Universe::heap()->can_load_archived_objects();
232 }
233
234 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
235 narrowOop* _start;
236 intx _offset;
237 uintptr_t _base;
238 uintptr_t _top;
239
240 public:
241 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
242 : _start(start),
243 _offset(loaded_region->_runtime_offset),
244 _base(loaded_region->_dumptime_base),
245 _top(loaded_region->top()) {}
246
247 bool do_bit(size_t offset) {
248 assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
249 narrowOop* p = _start + offset;
250 narrowOop v = *p;
251 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
252 uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
253 assert(_base <= o && o < _top, "must be");
254
255 o += _offset;
256 ArchiveHeapLoader::assert_in_loaded_heap(o);
257 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
258 return true;
259 }
260 };
261
262 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
263 MemRegion& archive_space) {
264 size_t total_bytes = 0;
265 FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
266 r->assert_is_heap_region();
267 if (r->used() == 0) {
268 return false;
269 }
270
271 assert(is_aligned(r->used(), HeapWordSize), "must be");
272 total_bytes += r->used();
273 loaded_region->_region_index = AOTMetaspace::hp;
274 loaded_region->_region_size = r->used();
275 loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
276
277 assert(is_aligned(total_bytes, HeapWordSize), "must be");
278 size_t word_size = total_bytes / HeapWordSize;
279 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
280 if (buffer == nullptr) {
281 return false;
282 }
283
284 archive_space = MemRegion(buffer, word_size);
285 _loaded_heap_bottom = (uintptr_t)archive_space.start();
286 _loaded_heap_top = _loaded_heap_bottom + total_bytes;
287
288 loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
289
290 return true;
291 }
292
293 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
294 uintptr_t load_address) {
295 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
296 if (bitmap_base == 0) {
297 _loading_failed = true;
298 return false; // OOM or CRC error
299 }
300
301 FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
302 if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
303 // There's no easy way to free the buffer, so we will fill it with zero later
304 // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
305 log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
306 _loading_failed = true;
307 return false;
308 }
309 assert(r->mapped_base() == (char*)load_address, "sanity");
310 log_info(aot)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
311 " size %6zu delta %zd",
312 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
313 loaded_region->_region_size, loaded_region->_runtime_offset);
314
315 uintptr_t oopmap = bitmap_base + r->oopmap_offset();
316 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
317
318 if (UseCompressedOops) {
319 PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
320 bm.iterate(&patcher);
321 } else {
322 PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region->_runtime_offset);
323 bm.iterate(&patcher);
324 }
325 return true;
326 }
327
328 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
329 assert(can_load(), "loaded heap for must be supported");
330 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
331
332 LoadedArchiveHeapRegion loaded_region;
333 memset(&loaded_region, 0, sizeof(loaded_region));
334
335 MemRegion archive_space;
336 if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
337 return false;
338 }
339
340 if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
341 assert(_loading_failed, "must be");
342 return false;
343 }
344
345 init_loaded_heap_relocation(&loaded_region);
346 _is_loaded = true;
347
348 return true;
349 }
350
351 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
352 HashTable<uintptr_t, bool>* _table;
353
354 public:
355 VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
356
357 virtual void do_oop(narrowOop* p) {
358 // This should be called before the loaded region is modified, so all the embedded pointers
359 // must be null, or must point to a valid object in the loaded region.
360 narrowOop v = *p;
361 if (!CompressedOops::is_null(v)) {
362 oop o = CompressedOops::decode_not_null(v);
363 uintptr_t u = cast_from_oop<uintptr_t>(o);
364 ArchiveHeapLoader::assert_in_loaded_heap(u);
365 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
366 }
367 }
368 virtual void do_oop(oop* p) {
369 oop v = *p;
370 if(v != nullptr) {
371 uintptr_t u = cast_from_oop<uintptr_t>(v);
372 ArchiveHeapLoader::assert_in_loaded_heap(u);
373 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
374 }
375 }
376 };
377
378 void ArchiveHeapLoader::finish_initialization() {
379 if (is_loaded()) {
380 // These operations are needed only when the heap is loaded (not mapped).
381 finish_loaded_heap();
382 if (VerifyArchivedFields > 0) {
383 verify_loaded_heap();
384 }
385 }
386 if (is_in_use()) {
387 patch_native_pointers();
388 intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
389
390 // The heap roots are stored in one or more segments that are laid out consecutively.
391 // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
392 HeapRootSegments segments = FileMapInfo::current_info()->heap_root_segments();
393 HeapShared::init_root_segment_sizes(segments.max_size_in_elems());
394 intptr_t first_segment_addr = bottom + segments.base_offset();
395 for (size_t c = 0; c < segments.count(); c++) {
396 oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
397 assert(segment_oop->is_objArray(), "Must be");
398 HeapShared::add_root_segment((objArrayOop)segment_oop);
399 }
400 }
401 }
402
403 void ArchiveHeapLoader::finish_loaded_heap() {
404 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
405 HeapWord* top = (HeapWord*)_loaded_heap_top;
406
407 MemRegion archive_space = MemRegion(bottom, top);
408 Universe::heap()->complete_loaded_archive_space(archive_space);
409 }
410
411 void ArchiveHeapLoader::verify_loaded_heap() {
412 log_info(aot, heap)("Verify all oops and pointers in loaded heap");
413
414 ResourceMark rm;
415 HashTable<uintptr_t, bool> table;
416 VerifyLoadedHeapEmbeddedPointers verifier(&table);
417 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
418 HeapWord* top = (HeapWord*)_loaded_heap_top;
419
420 for (HeapWord* p = bottom; p < top; ) {
421 oop o = cast_to_oop(p);
422 table.put(cast_from_oop<uintptr_t>(o), true);
423 p += o->size();
424 }
425
426 for (HeapWord* p = bottom; p < top; ) {
427 oop o = cast_to_oop(p);
428 o->oop_iterate(&verifier);
429 p += o->size();
430 }
431 }
432
433 void ArchiveHeapLoader::fill_failed_loaded_heap() {
434 assert(_loading_failed, "must be");
435 if (_loaded_heap_bottom != 0) {
436 assert(_loaded_heap_top != 0, "must be");
437 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
438 HeapWord* top = (HeapWord*)_loaded_heap_top;
439 Universe::heap()->fill_with_objects(bottom, top - bottom);
440 }
441 }
442
443 oop ArchiveHeapLoader::oop_from_offset(int offset) {
444 // Once GC starts, the offsets saved in CachedCodeDirectoryInternal::_permanent_oop_offsets
445 // will become invalid. I don't know what function can check if GCs are allowed, but surely
446 // GCs can't happen before the Object class is loaded.
447 assert(CDSConfig::is_using_archive(), "sanity");
448 assert(vmClasses::Object_klass()->class_loader_data() == nullptr,
449 "can be called only very early during VM start-up");
450 if (is_loaded()) {
451 return cast_to_oop(_loaded_heap_bottom + offset);
452 } else {
453 assert(is_mapped(), "must be");
454 return cast_to_oop(_mapped_heap_bottom + offset);
455 }
456 }
457
458 class PatchNativePointers: public BitMapClosure {
459 Metadata** _start;
460
461 public:
462 PatchNativePointers(Metadata** start) : _start(start) {}
463
464 bool do_bit(size_t offset) {
465 Metadata** p = _start + offset;
466 *p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
467 return true;
468 }
469 };
470
471 void ArchiveHeapLoader::patch_native_pointers() {
472 if (AOTMetaspace::relocation_delta() == 0) {
473 return;
474 }
475
476 FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
477 if (r->mapped_base() != nullptr && r->has_ptrmap()) {
478 log_info(aot, heap)("Patching native pointers in heap region");
479 BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
480 PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
481 bm.iterate(&patcher);
482 }
483 }
484 #endif // INCLUDE_CDS_JAVA_HEAP