1 /*
2 * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotLogging.hpp"
26 #include "cds/aotMappedHeapLoader.inline.hpp"
27 #include "cds/aotMappedHeapWriter.hpp"
28 #include "cds/aotMetaspace.hpp"
29 #include "cds/cdsConfig.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "classfile/classLoaderDataShared.hpp"
32 #include "classfile/stringTable.hpp"
33 #include "classfile/systemDictionaryShared.hpp"
34 #include "gc/shared/collectedHeap.hpp"
35 #include "logging/log.hpp"
36 #include "logging/logMessage.hpp"
37 #include "logging/logStream.hpp"
38 #include "logging/logTag.hpp"
39 #include "memory/allocation.inline.hpp"
40 #include "memory/iterator.inline.hpp"
41 #include "memory/resourceArea.hpp"
42 #include "memory/universe.hpp"
43 #include "sanitizers/ub.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/copy.hpp"
46 #if INCLUDE_G1GC
47 #include "gc/g1/g1CollectedHeap.hpp"
48 #include "gc/g1/g1HeapRegion.hpp"
49 #endif
50
51 #if INCLUDE_CDS_JAVA_HEAP
52
53 bool AOTMappedHeapLoader::_is_mapped = false;
54 bool AOTMappedHeapLoader::_is_loaded = false;
55
56 bool AOTMappedHeapLoader::_narrow_oop_base_initialized = false;
57 address AOTMappedHeapLoader::_narrow_oop_base;
58 int AOTMappedHeapLoader::_narrow_oop_shift;
59
60 // Support for loaded heap.
61 uintptr_t AOTMappedHeapLoader::_loaded_heap_bottom = 0;
62 uintptr_t AOTMappedHeapLoader::_loaded_heap_top = 0;
63 uintptr_t AOTMappedHeapLoader::_dumptime_base = UINTPTR_MAX;
64 uintptr_t AOTMappedHeapLoader::_dumptime_top = 0;
65 intx AOTMappedHeapLoader::_runtime_offset = 0;
66 bool AOTMappedHeapLoader::_loading_failed = false;
67
68 // Support for mapped heap.
69 uintptr_t AOTMappedHeapLoader::_mapped_heap_bottom = 0;
70 bool AOTMappedHeapLoader::_mapped_heap_relocation_initialized = false;
71 ptrdiff_t AOTMappedHeapLoader::_mapped_heap_delta = 0;
72
73 // Heap roots
74 GrowableArrayCHeap<OopHandle, mtClassShared>* AOTMappedHeapLoader::_root_segments = nullptr;
75 int AOTMappedHeapLoader::_root_segment_max_size_elems;
76
77 MemRegion AOTMappedHeapLoader::_mapped_heap_memregion;
78 bool AOTMappedHeapLoader::_heap_pointers_need_patching;
79
80 // Every mapped region is offset by _mapped_heap_delta from its requested address.
81 // See FileMapInfo::heap_region_requested_address().
82 ATTRIBUTE_NO_UBSAN
83 void AOTMappedHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
84 assert(!_mapped_heap_relocation_initialized, "only once");
85 if (!UseCompressedOops) {
86 assert(dumptime_oop_shift == 0, "sanity");
87 }
88 assert(can_map(), "sanity");
89 init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
90 _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
91 _mapped_heap_delta = delta;
92 _mapped_heap_relocation_initialized = true;
93 }
94
95 void AOTMappedHeapLoader::init_narrow_oop_decoding(address base, int shift) {
96 assert(!_narrow_oop_base_initialized, "only once");
97 _narrow_oop_base_initialized = true;
98 _narrow_oop_base = base;
99 _narrow_oop_shift = shift;
100 }
101
102 void AOTMappedHeapLoader::fixup_region() {
103 FileMapInfo* mapinfo = FileMapInfo::current_info();
104 if (is_mapped()) {
105 fixup_mapped_heap_region(mapinfo);
106 } else if (_loading_failed) {
107 fill_failed_loaded_heap();
108 }
109 }
110
111 // ------------------ Support for Region MAPPING -----------------------------------------
112
113 // Patch all the embedded oop pointers inside an archived heap region,
114 // to be consistent with the runtime oop encoding.
115 class PatchCompressedEmbeddedPointers: public BitMapClosure {
116 narrowOop* _start;
117
118 public:
119 PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
120
121 bool do_bit(size_t offset) {
122 narrowOop* p = _start + offset;
123 narrowOop v = *p;
124 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
125 oop o = AOTMappedHeapLoader::decode_from_mapped_archive(v);
126 RawAccess<IS_NOT_NULL>::oop_store(p, o);
127 return true;
128 }
129 };
130
131 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
132 narrowOop* _start;
133 uint32_t _delta;
134
135 public:
136 PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
137
138 bool do_bit(size_t offset) {
139 narrowOop* p = _start + offset;
140 narrowOop v = *p;
141 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
142 narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
143 assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
144 #ifdef ASSERT
145 oop o1 = AOTMappedHeapLoader::decode_from_mapped_archive(v);
146 oop o2 = CompressedOops::decode_not_null(new_v);
147 assert(o1 == o2, "quick delta must work");
148 #endif
149 RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
150 return true;
151 }
152 };
153
154 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
155 oop* _start;
156 intptr_t _delta;
157
158 public:
159 PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
160 _start(start),
161 _delta(runtime_offset) {}
162
163 PatchUncompressedEmbeddedPointers(oop* start) :
164 _start(start),
165 _delta(AOTMappedHeapLoader::mapped_heap_delta()) {}
166
167 bool do_bit(size_t offset) {
168 oop* p = _start + offset;
169 intptr_t dumptime_oop = (intptr_t)((void*)*p);
170 assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
171 intptr_t runtime_oop = dumptime_oop + _delta;
172 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
173 return true;
174 }
175 };
176
177 void AOTMappedHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
178 FileMapInfo* info,
179 MemRegion region) {
180 narrowOop dt_encoded_bottom = encoded_heap_region_dumptime_address(info);
181 narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
182 log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
183 (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
184
185 // Optimization: if dumptime shift is the same as runtime shift, we can perform a
186 // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
187 narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos();
188 if (_narrow_oop_shift == CompressedOops::shift()) {
189 uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
190 log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
191 if (quick_delta == 0) {
192 log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
193 } else {
194 PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
195 bm.iterate(&patcher);
196 }
197 } else {
198 log_info(aot)("heap data quick relocation not possible");
199 PatchCompressedEmbeddedPointers patcher(patching_start);
200 bm.iterate(&patcher);
201 }
202 }
203
204 // Patch all the non-null pointers that are embedded in the archived heap objects
205 // in this (mapped) region
206 void AOTMappedHeapLoader::patch_embedded_pointers(FileMapInfo* info,
207 MemRegion region, address oopmap,
208 size_t oopmap_size_in_bits) {
209 BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
210 if (UseCompressedOops) {
211 patch_compressed_embedded_pointers(bm, info, region);
212 } else {
213 PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos());
214 bm.iterate(&patcher);
215 }
216 }
217
218 // ------------------ Support for Region LOADING -----------------------------------------
219
220 // The CDS archive remembers each heap object by its address at dump time, but
221 // the heap object may be loaded at a different address at run time. This structure is used
222 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
223 // to their runtime addresses.
224 struct LoadedArchiveHeapRegion {
225 int _region_index; // index for FileMapInfo::space_at(index)
226 size_t _region_size; // number of bytes in this region
227 uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
228 intx _runtime_offset; // If an object's dump time address P is within in this region, its
229 // runtime address is P + _runtime_offset
230 uintptr_t top() {
231 return _dumptime_base + _region_size;
232 }
233 };
234
235 void AOTMappedHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
236 _dumptime_base = loaded_region->_dumptime_base;
237 _dumptime_top = loaded_region->top();
238 _runtime_offset = loaded_region->_runtime_offset;
239 }
240
241 bool AOTMappedHeapLoader::can_load() {
242 return Universe::heap()->can_load_archived_objects();
243 }
244
245 class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
246 narrowOop* _start;
247 intx _offset;
248 uintptr_t _base;
249 uintptr_t _top;
250
251 public:
252 PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
253 : _start(start),
254 _offset(loaded_region->_runtime_offset),
255 _base(loaded_region->_dumptime_base),
256 _top(loaded_region->top()) {}
257
258 bool do_bit(size_t offset) {
259 assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
260 narrowOop* p = _start + offset;
261 narrowOop v = *p;
262 assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
263 uintptr_t o = cast_from_oop<uintptr_t>(AOTMappedHeapLoader::decode_from_archive(v));
264 assert(_base <= o && o < _top, "must be");
265
266 o += _offset;
267 AOTMappedHeapLoader::assert_in_loaded_heap(o);
268 RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
269 return true;
270 }
271 };
272
273 bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
274 MemRegion& archive_space) {
275 size_t total_bytes = 0;
276 FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
277 r->assert_is_heap_region();
278 if (r->used() == 0) {
279 return false;
280 }
281
282 assert(is_aligned(r->used(), HeapWordSize), "must be");
283 total_bytes += r->used();
284 loaded_region->_region_index = AOTMetaspace::hp;
285 loaded_region->_region_size = r->used();
286 loaded_region->_dumptime_base = (uintptr_t)heap_region_dumptime_address(mapinfo);
287
288 assert(is_aligned(total_bytes, HeapWordSize), "must be");
289 size_t word_size = total_bytes / HeapWordSize;
290 HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
291 if (buffer == nullptr) {
292 return false;
293 }
294
295 archive_space = MemRegion(buffer, word_size);
296 _loaded_heap_bottom = (uintptr_t)archive_space.start();
297 _loaded_heap_top = _loaded_heap_bottom + total_bytes;
298
299 loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
300
301 return true;
302 }
303
304 bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
305 uintptr_t load_address) {
306 uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
307 if (bitmap_base == 0) {
308 _loading_failed = true;
309 return false; // OOM or CRC error
310 }
311
312 FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
313 if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
314 // There's no easy way to free the buffer, so we will fill it with zero later
315 // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
316 log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
317 _loading_failed = true;
318 return false;
319 }
320 assert(r->mapped_base() == (char*)load_address, "sanity");
321 log_info(aot)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
322 " size %6zu delta %zd",
323 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
324 loaded_region->_region_size, loaded_region->_runtime_offset);
325
326 uintptr_t oopmap = bitmap_base + r->oopmap_offset();
327 BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
328
329 if (UseCompressedOops) {
330 PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region);
331 bm.iterate(&patcher);
332 } else {
333 PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region->_runtime_offset);
334 bm.iterate(&patcher);
335 }
336 return true;
337 }
338
339 bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
340 assert(can_load(), "loaded heap for must be supported");
341 init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
342
343 LoadedArchiveHeapRegion loaded_region;
344 memset(&loaded_region, 0, sizeof(loaded_region));
345
346 MemRegion archive_space;
347 if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
348 return false;
349 }
350
351 if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
352 assert(_loading_failed, "must be");
353 return false;
354 }
355
356 init_loaded_heap_relocation(&loaded_region);
357 _is_loaded = true;
358
359 return true;
360 }
361
362 objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
363 if (CDSConfig::is_dumping_heap()) {
364 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
365 } else {
366 assert(CDSConfig::is_using_archive(), "must be");
367 }
368
369 objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
370 assert(segment != nullptr, "should have been initialized");
371 return segment;
372 }
373
374 void AOTMappedHeapLoader::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
375 assert(_root_segment_max_size_elems > 0, "sanity");
376
377 // Try to avoid divisions for the common case.
378 if (idx < _root_segment_max_size_elems) {
379 seg_idx = 0;
380 int_idx = idx;
381 } else {
382 seg_idx = idx / _root_segment_max_size_elems;
383 int_idx = idx % _root_segment_max_size_elems;
384 }
385
386 assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
387 "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
388 }
389
390 void AOTMappedHeapLoader::add_root_segment(objArrayOop segment_oop) {
391 assert(segment_oop != nullptr, "must be");
392 assert(is_in_use(), "must be");
393 if (_root_segments == nullptr) {
394 _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
395 }
396 _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
397 }
398
399 void AOTMappedHeapLoader::init_root_segment_sizes(int max_size_elems) {
400 _root_segment_max_size_elems = max_size_elems;
401 }
402
403 oop AOTMappedHeapLoader::get_root(int index) {
404 assert(!_root_segments->is_empty(), "must have loaded shared heap");
405 int seg_idx, int_idx;
406 get_segment_indexes(index, seg_idx, int_idx);
407 objArrayOop result = objArrayOop(root_segment(seg_idx));
408 return result->obj_at(int_idx);
409 }
410
411 void AOTMappedHeapLoader::clear_root(int index) {
412 int seg_idx, int_idx;
413 get_segment_indexes(index, seg_idx, int_idx);
414 root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
415 }
416
417 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
418 HashTable<uintptr_t, bool>* _table;
419
420 public:
421 VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
422
423 virtual void do_oop(narrowOop* p) {
424 // This should be called before the loaded region is modified, so all the embedded pointers
425 // must be null, or must point to a valid object in the loaded region.
426 narrowOop v = *p;
427 if (!CompressedOops::is_null(v)) {
428 oop o = CompressedOops::decode_not_null(v);
429 uintptr_t u = cast_from_oop<uintptr_t>(o);
430 AOTMappedHeapLoader::assert_in_loaded_heap(u);
431 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
432 }
433 }
434 virtual void do_oop(oop* p) {
435 oop v = *p;
436 if(v != nullptr) {
437 uintptr_t u = cast_from_oop<uintptr_t>(v);
438 AOTMappedHeapLoader::assert_in_loaded_heap(u);
439 guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
440 }
441 }
442 };
443
444 void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
445 patch_heap_embedded_pointers(info);
446
447 if (is_loaded()) {
448 // These operations are needed only when the heap is loaded (not mapped).
449 finish_loaded_heap();
450 if (VerifyArchivedFields > 0) {
451 verify_loaded_heap();
452 }
453 }
454 if (is_in_use()) {
455 patch_native_pointers();
456 intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
457
458 // The heap roots are stored in one or more segments that are laid out consecutively.
459 // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
460 HeapRootSegments segments = FileMapInfo::current_info()->mapped_heap()->root_segments();
461 init_root_segment_sizes(segments.max_size_in_elems());
462 intptr_t first_segment_addr = bottom + segments.base_offset();
463 for (size_t c = 0; c < segments.count(); c++) {
464 oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
465 assert(segment_oop->is_objArray(), "Must be");
466 add_root_segment((objArrayOop)segment_oop);
467 }
468
469 StringTable::load_shared_strings_array();
470 }
471 }
472
473 void AOTMappedHeapLoader::finish_loaded_heap() {
474 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
475 HeapWord* top = (HeapWord*)_loaded_heap_top;
476
477 MemRegion archive_space = MemRegion(bottom, top);
478 Universe::heap()->complete_loaded_archive_space(archive_space);
479 }
480
481 void AOTMappedHeapLoader::verify_loaded_heap() {
482 log_info(aot, heap)("Verify all oops and pointers in loaded heap");
483
484 ResourceMark rm;
485 HashTable<uintptr_t, bool> table;
486 VerifyLoadedHeapEmbeddedPointers verifier(&table);
487 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
488 HeapWord* top = (HeapWord*)_loaded_heap_top;
489
490 for (HeapWord* p = bottom; p < top; ) {
491 oop o = cast_to_oop(p);
492 table.put(cast_from_oop<uintptr_t>(o), true);
493 p += o->size();
494 }
495
496 for (HeapWord* p = bottom; p < top; ) {
497 oop o = cast_to_oop(p);
498 o->oop_iterate(&verifier);
499 p += o->size();
500 }
501 }
502
503 void AOTMappedHeapLoader::fill_failed_loaded_heap() {
504 assert(_loading_failed, "must be");
505 if (_loaded_heap_bottom != 0) {
506 assert(_loaded_heap_top != 0, "must be");
507 HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
508 HeapWord* top = (HeapWord*)_loaded_heap_top;
509 Universe::heap()->fill_with_objects(bottom, top - bottom);
510 }
511 }
512
513 class PatchNativePointers: public BitMapClosure {
514 Metadata** _start;
515
516 public:
517 PatchNativePointers(Metadata** start) : _start(start) {}
518
519 bool do_bit(size_t offset) {
520 Metadata** p = _start + offset;
521 *p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
522 return true;
523 }
524 };
525
526 void AOTMappedHeapLoader::patch_native_pointers() {
527 if (AOTMetaspace::relocation_delta() == 0) {
528 return;
529 }
530
531 FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
532 if (r->mapped_base() != nullptr && r->has_ptrmap()) {
533 log_info(aot, heap)("Patching native pointers in heap region");
534 BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
535 PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->mapped_heap()->ptrmap_start_pos());
536 bm.iterate(&patcher);
537 }
538 }
539
540 // The actual address of this region during dump time.
541 address AOTMappedHeapLoader::heap_region_dumptime_address(FileMapInfo* info) {
542 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
543 assert(CDSConfig::is_using_archive(), "runtime only");
544 assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
545 if (UseCompressedOops) {
546 return /*dumptime*/ (address)((uintptr_t)info->narrow_oop_base() + r->mapping_offset());
547 } else {
548 return heap_region_requested_address(info);
549 }
550 }
551
552 // The address where this region can be mapped into the runtime heap without
553 // patching any of the pointers that are embedded in this region.
554 address AOTMappedHeapLoader::heap_region_requested_address(FileMapInfo* info) {
555 assert(CDSConfig::is_using_archive(), "runtime only");
556 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
557 assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
558 assert(can_use(), "cannot be used by AOTMappedHeapLoader::can_load() mode");
559 if (UseCompressedOops) {
560 // We can avoid relocation if each region's offset from the runtime CompressedOops::base()
561 // is the same as its offset from the CompressedOops::base() during dumptime.
562 // Note that CompressedOops::base() may be different between dumptime and runtime.
563 //
564 // Example:
565 // Dumptime base = 0x1000 and shift is 0. We have a region at address 0x2000. There's a
566 // narrowOop P stored in this region that points to an object at address 0x2200.
567 // P's encoded value is 0x1200.
568 //
569 // Runtime base = 0x4000 and shift is also 0. If we map this region at 0x5000, then
570 // the value P can remain 0x1200. The decoded address = (0x4000 + (0x1200 << 0)) = 0x5200,
571 // which is the runtime location of the referenced object.
572 return /*runtime*/ (address)((uintptr_t)CompressedOops::base() + r->mapping_offset());
573 } else {
574 // This was the hard-coded requested base address used at dump time. With uncompressed oops,
575 // the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
576 // what base address was picked at duump time.
577 return (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
578 }
579 }
580
581 bool AOTMappedHeapLoader::map_heap_region(FileMapInfo* info) {
582 if (map_heap_region_impl(info)) {
583 #ifdef ASSERT
584 // The "old" regions must be parsable -- we cannot have any unused space
585 // at the start of the lowest G1 region that contains archived objects.
586 assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
587
588 // Make sure we map at the very top of the heap - see comments in
589 // init_heap_region_relocation().
590 MemRegion heap_range = G1CollectedHeap::heap()->reserved();
591 assert(heap_range.contains(_mapped_heap_memregion), "must be");
592
593 address heap_end = (address)heap_range.end();
594 address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
595 assert(heap_end >= mapped_heap_region_end, "must be");
596 assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
597 "must be at the top of the heap to avoid fragmentation");
598 #endif
599
600 set_mapped();
601 return true;
602 } else {
603 return false;
604 }
605 }
606
607 bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
608 assert(UseG1GC, "the following code assumes G1");
609
610 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
611 size_t size = r->used();
612 if (size == 0) {
613 return false; // no archived java heap data
614 }
615
616 size_t word_size = size / HeapWordSize;
617 address requested_start = heap_region_requested_address(info);
618
619 aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
620
621 // allocate from java heap
622 HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
623 if (start == nullptr) {
624 AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
625 return false;
626 }
627
628 _mapped_heap_memregion = MemRegion(start, word_size);
629
630 // Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
631 // for mapped region as it is part of the reserved java heap, which is already recorded.
632 char* addr = (char*)_mapped_heap_memregion.start();
633 char* base;
634
635 if (AOTMetaspace::use_windows_memory_mapping() || UseLargePages) {
636 // With UseLargePages, memory mapping may fail on some OSes if the size is not
637 // large page aligned, so let's use read() instead. In this case, the memory region
638 // is already commited by G1 so we don't need to commit it again.
639 if (!info->read_region(AOTMetaspace::hp, addr,
640 align_up(_mapped_heap_memregion.byte_size(), os::vm_page_size()),
641 /* do_commit = */ !UseLargePages)) {
642 dealloc_heap_region(info);
643 aot_log_error(aot)("Failed to read archived heap region into " INTPTR_FORMAT, p2i(addr));
644 return false;
645 }
646 // Checks for VerifySharedSpaces is already done inside read_region()
647 base = addr;
648 } else {
649 base = info->map_heap_region(r, addr, _mapped_heap_memregion.byte_size());
650 if (base == nullptr || base != addr) {
651 dealloc_heap_region(info);
652 AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to map at required address in java heap. "
653 INTPTR_FORMAT ", size = %zu bytes",
654 p2i(addr), _mapped_heap_memregion.byte_size());
655 return false;
656 }
657
658 if (VerifySharedSpaces && !r->check_region_crc(base)) {
659 dealloc_heap_region(info);
660 AOTMetaspace::report_loading_error("UseSharedSpaces: mapped heap region is corrupt");
661 return false;
662 }
663 }
664
665 r->set_mapped_base(base);
666
667 // If the requested range is different from the range allocated by GC, then
668 // the pointers need to be patched.
669 address mapped_start = (address) _mapped_heap_memregion.start();
670 ptrdiff_t delta = mapped_start - requested_start;
671 if (UseCompressedOops &&
672 (info->narrow_oop_mode() != CompressedOops::mode() ||
673 info->narrow_oop_shift() != CompressedOops::shift())) {
674 _heap_pointers_need_patching = true;
675 }
676 if (delta != 0) {
677 _heap_pointers_need_patching = true;
678 }
679 init_mapped_heap_info(mapped_start, delta, info->narrow_oop_shift());
680
681 if (_heap_pointers_need_patching) {
682 char* bitmap_base = info->map_bitmap_region();
683 if (bitmap_base == nullptr) {
684 AOTMetaspace::report_loading_error("CDS heap cannot be used because bitmap region cannot be mapped");
685 dealloc_heap_region(info);
686 _heap_pointers_need_patching = false;
687 return false;
688 }
689 }
690 aot_log_info(aot)("Heap data mapped at " INTPTR_FORMAT ", size = %8zu bytes",
691 p2i(mapped_start), _mapped_heap_memregion.byte_size());
692 aot_log_info(aot)("CDS heap data relocation delta = %zd bytes", delta);
693 return true;
694 }
695
696 narrowOop AOTMappedHeapLoader::encoded_heap_region_dumptime_address(FileMapInfo* info) {
697 assert(CDSConfig::is_using_archive(), "runtime only");
698 assert(UseCompressedOops, "sanity");
699 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
700 return CompressedOops::narrow_oop_cast(r->mapping_offset() >> info->narrow_oop_shift());
701 }
702
703 void AOTMappedHeapLoader::patch_heap_embedded_pointers(FileMapInfo* info) {
704 if (!info->is_mapped() || !_heap_pointers_need_patching) {
705 return;
706 }
707
708 char* bitmap_base = info->map_bitmap_region();
709 assert(bitmap_base != nullptr, "must have already been mapped");
710
711 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
712 patch_embedded_pointers(
713 info, _mapped_heap_memregion,
714 (address)(info->region_at(AOTMetaspace::bm)->mapped_base()) + r->oopmap_offset(),
715 r->oopmap_size_in_bits());
716 }
717
718 void AOTMappedHeapLoader::fixup_mapped_heap_region(FileMapInfo* info) {
719 if (is_mapped()) {
720 assert(!_mapped_heap_memregion.is_empty(), "sanity");
721
722 // Populate the archive regions' G1BlockOffsetTables. That ensures
723 // fast G1BlockOffsetTable::block_start operations for any given address
724 // within the archive regions when trying to find start of an object
725 // (e.g. during card table scanning).
726 G1CollectedHeap::heap()->populate_archive_regions_bot(_mapped_heap_memregion);
727 }
728 }
729
730 // dealloc the archive regions from java heap
731 void AOTMappedHeapLoader::dealloc_heap_region(FileMapInfo* info) {
732 G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
733 }
734
735 AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
736 class MappedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
737 private:
738 address _current;
739 address _next;
740
741 address _buffer_start;
742 address _buffer_end;
743 uint64_t _buffer_start_narrow_oop;
744 intptr_t _buffer_to_requested_delta;
745 int _requested_shift;
746
747 size_t _num_root_segments;
748 size_t _num_obj_arrays_logged;
749
750 public:
751 MappedLoaderOopIterator(address buffer_start,
752 address buffer_end,
753 uint64_t buffer_start_narrow_oop,
754 intptr_t buffer_to_requested_delta,
755 int requested_shift,
756 size_t num_root_segments)
757 : _current(nullptr),
758 _next(buffer_start),
759 _buffer_start(buffer_start),
760 _buffer_end(buffer_end),
761 _buffer_start_narrow_oop(buffer_start_narrow_oop),
762 _buffer_to_requested_delta(buffer_to_requested_delta),
763 _requested_shift(requested_shift),
764 _num_root_segments(num_root_segments),
765 _num_obj_arrays_logged(0) {
766 }
767
768
769 AOTMapLogger::OopData capture(address buffered_addr) {
770 oopDesc* raw_oop = (oopDesc*)buffered_addr;
771 size_t size = raw_oop->size();
772 address requested_addr = buffered_addr + _buffer_to_requested_delta;
773 intptr_t target_location = intptr_t(requested_addr);
774 uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
775 uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
776 Klass* klass = raw_oop->klass();
777
778 return { buffered_addr,
779 requested_addr,
780 target_location,
781 narrow_location,
782 raw_oop,
783 klass,
784 size,
785 false };
786 }
787
788 bool has_next() override {
789 return _next < _buffer_end;
790 }
791
792 AOTMapLogger::OopData next() override {
793 _current = _next;
794 AOTMapLogger::OopData result = capture(_current);
795 if (result._klass->is_objArray_klass()) {
796 result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
797 }
798 _next = _current + result._size * BytesPerWord;
799 return result;
800 }
801
802 AOTMapLogger::OopData obj_at(narrowOop* addr) override {
803 uint64_t n = (uint64_t)(*addr);
804 if (n == 0) {
805 return null_data();
806 } else {
807 precond(n >= _buffer_start_narrow_oop);
808 address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
809 return capture(buffer_addr);
810 }
811 }
812
813 AOTMapLogger::OopData obj_at(oop* addr) override {
814 address requested_value = cast_from_oop<address>(*addr);
815 if (requested_value == nullptr) {
816 return null_data();
817 } else {
818 address buffer_addr = requested_value - _buffer_to_requested_delta;
819 return capture(buffer_addr);
820 }
821 }
822
823 GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
824 return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
825 }
826 };
827
828 FileMapRegion* r = info->region_at(AOTMetaspace::hp);
829 address requested_base = UseCompressedOops ? (address)info->narrow_oop_base() : heap_region_requested_address(info);
830 address requested_start = requested_base + r->mapping_offset();
831 int requested_shift = info->narrow_oop_shift();
832 intptr_t buffer_to_requested_delta = requested_start - buffer_start;
833 uint64_t buffer_start_narrow_oop = 0xdeadbeed;
834 if (UseCompressedOops) {
835 buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
836 assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
837 }
838
839 return new MappedLoaderOopIterator(buffer_start,
840 buffer_end,
841 buffer_start_narrow_oop,
842 buffer_to_requested_delta,
843 requested_shift,
844 info->mapped_heap()->root_segments().count());
845 }
846
847 #endif // INCLUDE_CDS_JAVA_HEAP