1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotMetaspace.hpp"
26 #include "cds/archiveHeapLoader.inline.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/heapShared.hpp"
29 #include "classfile/classLoaderDataShared.hpp"
30 #include "classfile/systemDictionaryShared.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "logging/log.hpp"
33 #include "memory/iterator.inline.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "memory/universe.hpp"
36 #include "sanitizers/ub.hpp"
37 #include "utilities/bitMap.inline.hpp"
38 #include "utilities/copy.hpp"
39
40 #if INCLUDE_CDS_JAVA_HEAP
41
42 bool ArchiveHeapLoader::_is_mapped = false;
43 bool ArchiveHeapLoader::_is_loaded = false;
44
45 bool ArchiveHeapLoader::_narrow_oop_base_initialized = false;
46 address ArchiveHeapLoader::_narrow_oop_base;
47 int ArchiveHeapLoader::_narrow_oop_shift;
48
49 // Support for loaded heap.
50 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
51 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
52 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
53 uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
54 intx ArchiveHeapLoader::_runtime_offset = 0;
55 bool ArchiveHeapLoader::_loading_failed = false;
56
57 // Support for mapped heap.
58 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
59 bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
60 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
61
62 // Every mapped region is offset by _mapped_heap_delta from its requested address.
63 // See FileMapInfo::heap_region_requested_address().
64 ATTRIBUTE_NO_UBSAN
65 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
66 assert(!_mapped_heap_relocation_initialized, "only once");
67 if (!UseCompressedOops) {
68 assert(dumptime_oop_shift == 0, "sanity");
69 }
70 assert(can_map(), "sanity");
71 init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
72 _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
73 _mapped_heap_delta = delta;
74 _mapped_heap_relocation_initialized = true;
75 }
76
77 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
78 assert(!_narrow_oop_base_initialized, "only once");
79 _narrow_oop_base_initialized = true;
80 _narrow_oop_base = base;
81 _narrow_oop_shift = shift;
82 }
83
84 void ArchiveHeapLoader::fixup_region() {
85 FileMapInfo* mapinfo = FileMapInfo::current_info();
86 if (is_mapped()) {
87 mapinfo->fixup_mapped_heap_region();
88 } else if (_loading_failed) {
89 fill_failed_loaded_heap();
90 }
91 if (is_in_use()) {
92 if (!CDSConfig::is_using_full_module_graph()) {
93 // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
94 ClassLoaderDataShared::clear_archived_oops();
95 }
96 }
97 }
98
99 // ------------------ Support for Region MAPPING -----------------------------------------
100
101 // Patch all the embedded oop pointers inside an archived heap region,
102 // to be consistent with the runtime oop encoding.
103 class PatchCompressedEmbeddedPointers: public BitMapClosure {
104 narrowOop* _start;
105
106 public:
107 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
108
109 bool do_bit(size_t offset) {
110 narrowOop* p = _start + offset;
111 narrowOop v = *p;
112 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
113 oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
114 RawAccess<IS_NOT_NULL>::oop_store(p, o);
115 return true;
116 }
117 };
118
119 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
120 narrowOop* _start;
121 uint32_t _delta;
122
123 public:
124 PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
125
126 bool do_bit(size_t offset) {
127 narrowOop* p = _start + offset;
128 narrowOop v = *p;
129 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
130 narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
131 assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
132 #ifdef ASSERT
133 oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
134 oop o2 = CompressedOops::decode_not_null(new_v);
135 assert(o1 == o2, "quick delta must work");
136 #endif
137 RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
138 return true;
139 }
140 };
141
142 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
143 oop* _start;
144 intptr_t _delta;
145
146 public:
147 PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
148 _start(start),
149 _delta(runtime_offset) {}
150
151 PatchUncompressedEmbeddedPointers(oop* start) :
152 _start(start),
153 _delta(ArchiveHeapLoader::mapped_heap_delta()) {}
154
155 bool do_bit(size_t offset) {
156 oop* p = _start + offset;
157 intptr_t dumptime_oop = (intptr_t)((void*)*p);
158 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
159 intptr_t runtime_oop = dumptime_oop + _delta;
160 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
161 return true;
162 }
163 };
164
165 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
166 FileMapInfo* info,
167 MemRegion region) {
168 narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
169 narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
170 log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
171 (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
172
173 // Optimization: if dumptime shift is the same as runtime shift, we can perform a
174 // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
175 narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
176 if (_narrow_oop_shift == CompressedOops::shift()) {
177 uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
178 log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
179 if (quick_delta == 0) {
180 log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
181 } else {
182 PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
183 bm.iterate(&patcher);
184 }
185 } else {
186 log_info(aot)("heap data quick relocation not possible");
187 PatchCompressedEmbeddedPointers patcher(patching_start);
188 bm.iterate(&patcher);
189 }
190 }
191
192 // Patch all the non-null pointers that are embedded in the archived heap objects
193 // in this (mapped) region
194 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
195 MemRegion region, address oopmap,
196 size_t oopmap_size_in_bits) {
197 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
198 if (UseCompressedOops) {
199 patch_compressed_embedded_pointers(bm, info, region);
200 } else {
201 PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
202 bm.iterate(&patcher);
203 }
204 }
205
206 // ------------------ Support for Region LOADING -----------------------------------------
207
208 // The CDS archive remembers each heap object by its address at dump time, but
209 // the heap object may be loaded at a different address at run time. This structure is used
210 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
211 // to their runtime addresses.
212 struct LoadedArchiveHeapRegion {
213 int _region_index; // index for FileMapInfo::space_at(index)
214 size_t _region_size; // number of bytes in this region
215 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
216 intx _runtime_offset; // If an object's dump time address P is within in this region, its
217 // runtime address is P + _runtime_offset
218 uintptr_t top() {
219 return _dumptime_base + _region_size;
220 }
221 };
222
223 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
224 _dumptime_base = loaded_region->_dumptime_base;
225 _dumptime_top = loaded_region->top();
226 _runtime_offset = loaded_region->_runtime_offset;
227 }
228
229 bool ArchiveHeapLoader::can_load() {
230 return Universe::heap()->can_load_archived_objects();
231 }
232
233 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
234 narrowOop* _start;
235 intx _offset;
236 uintptr_t _base;
237 uintptr_t _top;
238
239 public:
240 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
241 : _start(start),
242 _offset(loaded_region->_runtime_offset),
243 _base(loaded_region->_dumptime_base),
244 _top(loaded_region->top()) {}
245
246 bool do_bit(size_t offset) {
247 assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
248 narrowOop* p = _start + offset;
249 narrowOop v = *p;
250 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
251 uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
252 assert(_base <= o && o < _top, "must be");
253
254 o += _offset;
255 ArchiveHeapLoader::assert_in_loaded_heap(o);
256 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
257 return true;
258 }
259 };
260
261 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
262 MemRegion& archive_space) {
263 size_t total_bytes = 0;
264 FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
265 r->assert_is_heap_region();
266 if (r->used() == 0) {
267 return false;
268 }
269
270 assert(is_aligned(r->used(), HeapWordSize), "must be");
271 total_bytes += r->used();
272 loaded_region->_region_index = AOTMetaspace::hp;
273 loaded_region->_region_size = r->used();
274 loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
275
276 assert(is_aligned(total_bytes, HeapWordSize), "must be");
277 size_t word_size = total_bytes / HeapWordSize;
278 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
279 if (buffer == nullptr) {
280 return false;
281 }
282
283 archive_space = MemRegion(buffer, word_size);
284 _loaded_heap_bottom = (uintptr_t)archive_space.start();
285 _loaded_heap_top = _loaded_heap_bottom + total_bytes;
286
287 loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
288
289 return true;
290 }
291
292 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
293 uintptr_t load_address) {
294 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
295 if (bitmap_base == 0) {
296 _loading_failed = true;
297 return false; // OOM or CRC error
298 }
299
300 FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
301 if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
302 // There's no easy way to free the buffer, so we will fill it with zero later
303 // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
304 log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
305 _loading_failed = true;
306 return false;
307 }
308 assert(r->mapped_base() == (char*)load_address, "sanity");
309 log_info(aot)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
310 " size %6zu delta %zd",
311 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
312 loaded_region->_region_size, loaded_region->_runtime_offset);
313
314 uintptr_t oopmap = bitmap_base + r->oopmap_offset();
315 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
316
317 if (UseCompressedOops) {
318 PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
319 bm.iterate(&patcher);
320 } else {
321 PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region->_runtime_offset);
322 bm.iterate(&patcher);
323 }
324 return true;
325 }
326
327 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
328 assert(can_load(), "loaded heap for must be supported");
329 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
330
331 LoadedArchiveHeapRegion loaded_region;
332 memset(&loaded_region, 0, sizeof(loaded_region));
333
334 MemRegion archive_space;
335 if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
336 return false;
337 }
338
339 if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
340 assert(_loading_failed, "must be");
341 return false;
342 }
343
344 init_loaded_heap_relocation(&loaded_region);
345 _is_loaded = true;
346
347 return true;
348 }
349
350 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
351 HashTable<uintptr_t, bool>* _table;
352
353 public:
354 VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
355
356 virtual void do_oop(narrowOop* p) {
357 // This should be called before the loaded region is modified, so all the embedded pointers
358 // must be null, or must point to a valid object in the loaded region.
359 narrowOop v = *p;
360 if (!CompressedOops::is_null(v)) {
361 oop o = CompressedOops::decode_not_null(v);
362 uintptr_t u = cast_from_oop<uintptr_t>(o);
363 ArchiveHeapLoader::assert_in_loaded_heap(u);
364 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
365 }
366 }
367 virtual void do_oop(oop* p) {
368 oop v = *p;
369 if(v != nullptr) {
370 uintptr_t u = cast_from_oop<uintptr_t>(v);
371 ArchiveHeapLoader::assert_in_loaded_heap(u);
372 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
373 }
374 }
375 };
376
377 void ArchiveHeapLoader::finish_initialization() {
378 if (is_loaded()) {
379 // These operations are needed only when the heap is loaded (not mapped).
380 finish_loaded_heap();
381 if (VerifyArchivedFields > 0) {
382 verify_loaded_heap();
383 }
384 }
385 if (is_in_use()) {
386 patch_native_pointers();
387 intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
388
389 // The heap roots are stored in one or more segments that are laid out consecutively.
390 // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
391 HeapRootSegments segments = FileMapInfo::current_info()->heap_root_segments();
392 HeapShared::init_root_segment_sizes(segments.max_size_in_elems());
393 intptr_t first_segment_addr = bottom + segments.base_offset();
394 for (size_t c = 0; c < segments.count(); c++) {
395 oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
396 assert(segment_oop->is_objArray(), "Must be");
397 HeapShared::add_root_segment((objArrayOop)segment_oop);
398 }
399 }
400 }
401
402 void ArchiveHeapLoader::finish_loaded_heap() {
403 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
404 HeapWord* top = (HeapWord*)_loaded_heap_top;
405
406 MemRegion archive_space = MemRegion(bottom, top);
407 Universe::heap()->complete_loaded_archive_space(archive_space);
408 }
409
410 void ArchiveHeapLoader::verify_loaded_heap() {
411 log_info(aot, heap)("Verify all oops and pointers in loaded heap");
412
413 ResourceMark rm;
414 HashTable<uintptr_t, bool> table;
415 VerifyLoadedHeapEmbeddedPointers verifier(&table);
416 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
417 HeapWord* top = (HeapWord*)_loaded_heap_top;
418
419 for (HeapWord* p = bottom; p < top; ) {
420 oop o = cast_to_oop(p);
421 table.put(cast_from_oop<uintptr_t>(o), true);
422 p += o->size();
423 }
424
425 for (HeapWord* p = bottom; p < top; ) {
426 oop o = cast_to_oop(p);
427 o->oop_iterate(&verifier);
428 p += o->size();
429 }
430 }
431
432 void ArchiveHeapLoader::fill_failed_loaded_heap() {
433 assert(_loading_failed, "must be");
434 if (_loaded_heap_bottom != 0) {
435 assert(_loaded_heap_top != 0, "must be");
436 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
437 HeapWord* top = (HeapWord*)_loaded_heap_top;
438 Universe::heap()->fill_with_objects(bottom, top - bottom);
439 }
440 }
441
442 class PatchNativePointers: public BitMapClosure {
443 Metadata** _start;
444
445 public:
446 PatchNativePointers(Metadata** start) : _start(start) {}
447
448 bool do_bit(size_t offset) {
449 Metadata** p = _start + offset;
450 *p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
451 return true;
452 }
453 };
454
455 void ArchiveHeapLoader::patch_native_pointers() {
456 if (AOTMetaspace::relocation_delta() == 0) {
457 return;
458 }
459
460 FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
461 if (r->mapped_base() != nullptr && r->has_ptrmap()) {
462 log_info(aot, heap)("Patching native pointers in heap region");
463 BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
464 PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
465 bm.iterate(&patcher);
466 }
467 }
468 #endif // INCLUDE_CDS_JAVA_HEAP