1 /*
  2  * Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotLogging.hpp"
 26 #include "cds/aotMappedHeap.hpp"
 27 #include "cds/aotMappedHeapLoader.inline.hpp"
 28 #include "cds/aotMappedHeapWriter.hpp"
 29 #include "cds/aotMetaspace.hpp"
 30 #include "cds/cdsConfig.hpp"
 31 #include "cds/heapShared.inline.hpp"
 32 #include "classfile/classLoaderDataShared.hpp"
 33 #include "classfile/stringTable.hpp"
 34 #include "classfile/systemDictionaryShared.hpp"
 35 #include "gc/shared/collectedHeap.hpp"
 36 #include "logging/log.hpp"
 37 #include "logging/logMessage.hpp"
 38 #include "logging/logStream.hpp"
 39 #include "logging/logTag.hpp"
 40 #include "memory/allocation.inline.hpp"
 41 #include "memory/iterator.inline.hpp"
 42 #include "memory/resourceArea.hpp"
 43 #include "memory/universe.hpp"
 44 #include "sanitizers/ub.hpp"
 45 #include "utilities/bitMap.inline.hpp"
 46 #include "utilities/copy.hpp"
 47 #if INCLUDE_G1GC
 48 #include "gc/g1/g1CollectedHeap.hpp"
 49 #include "gc/g1/g1HeapRegion.hpp"
 50 #endif
 51 
 52 #if INCLUDE_CDS_JAVA_HEAP
 53 
 54 bool AOTMappedHeapLoader::_is_mapped = false;
 55 bool AOTMappedHeapLoader::_is_loaded = false;
 56 
 57 bool    AOTMappedHeapLoader::_narrow_oop_base_initialized = false;
 58 address AOTMappedHeapLoader::_narrow_oop_base;
 59 int     AOTMappedHeapLoader::_narrow_oop_shift;
 60 
 61 // Support for loaded heap.
 62 uintptr_t AOTMappedHeapLoader::_loaded_heap_bottom = 0;
 63 uintptr_t AOTMappedHeapLoader::_loaded_heap_top = 0;
 64 uintptr_t AOTMappedHeapLoader::_dumptime_base = UINTPTR_MAX;
 65 uintptr_t AOTMappedHeapLoader::_dumptime_top = 0;
 66 intx AOTMappedHeapLoader::_runtime_offset = 0;
 67 bool AOTMappedHeapLoader::_loading_failed = false;
 68 
 69 // Support for mapped heap.
 70 uintptr_t AOTMappedHeapLoader::_mapped_heap_bottom = 0;
 71 bool      AOTMappedHeapLoader::_mapped_heap_relocation_initialized = false;
 72 ptrdiff_t AOTMappedHeapLoader::_mapped_heap_delta = 0;
 73 
 74 // Heap roots
 75 GrowableArrayCHeap<OopHandle, mtClassShared>* AOTMappedHeapLoader::_root_segments = nullptr;
 76 int AOTMappedHeapLoader::_root_segment_max_size_elems;
 77 
 78 MemRegion AOTMappedHeapLoader::_mapped_heap_memregion;
 79 bool AOTMappedHeapLoader::_heap_pointers_need_patching;
 80 
 81 // Every mapped region is offset by _mapped_heap_delta from its requested address.
 82 // See FileMapInfo::heap_region_requested_address().
 83 ATTRIBUTE_NO_UBSAN
 84 void AOTMappedHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
 85   assert(!_mapped_heap_relocation_initialized, "only once");
 86   if (!UseCompressedOops) {
 87     assert(dumptime_oop_shift == 0, "sanity");
 88   }
 89   assert(can_map(), "sanity");
 90   init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
 91   _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
 92   _mapped_heap_delta = delta;
 93   _mapped_heap_relocation_initialized = true;
 94 }
 95 
 96 void AOTMappedHeapLoader::init_narrow_oop_decoding(address base, int shift) {
 97   assert(!_narrow_oop_base_initialized, "only once");
 98   _narrow_oop_base_initialized = true;
 99   _narrow_oop_base = base;
100   _narrow_oop_shift = shift;
101 }
102 
103 void AOTMappedHeapLoader::fixup_region() {
104   FileMapInfo* mapinfo = FileMapInfo::current_info();
105   if (is_mapped()) {
106     fixup_mapped_heap_region(mapinfo);
107   } else if (_loading_failed) {
108     fill_failed_loaded_heap();
109   }
110 }
111 
112 // ------------------ Support for Region MAPPING -----------------------------------------
113 
114 // Patch all the embedded oop pointers inside an archived heap region,
115 // to be consistent with the runtime oop encoding.
116 class PatchCompressedEmbeddedPointers: public BitMapClosure {
117   narrowOop* _start;
118 
119  public:
120   PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
121 
122   bool do_bit(size_t offset) {
123     narrowOop* p = _start + offset;
124     narrowOop v = *p;
125     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
126     oop o = AOTMappedHeapLoader::decode_from_mapped_archive(v);
127     RawAccess<IS_NOT_NULL>::oop_store(p, o);
128     return true;
129   }
130 };
131 
132 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
133   narrowOop* _start;
134   uint32_t _delta;
135 
136  public:
137   PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
138 
139   bool do_bit(size_t offset) {
140     narrowOop* p = _start + offset;
141     narrowOop v = *p;
142     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
143     narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
144     assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
145 #ifdef ASSERT
146     oop o1 = AOTMappedHeapLoader::decode_from_mapped_archive(v);
147     oop o2 = CompressedOops::decode_not_null(new_v);
148     assert(o1 == o2, "quick delta must work");
149 #endif
150     RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
151     return true;
152   }
153 };
154 
155 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
156   oop* _start;
157   intptr_t _delta;
158 
159  public:
160   PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
161     _start(start),
162     _delta(runtime_offset) {}
163 
164   PatchUncompressedEmbeddedPointers(oop* start) :
165     _start(start),
166     _delta(AOTMappedHeapLoader::mapped_heap_delta()) {}
167 
168   bool do_bit(size_t offset) {
169     oop* p = _start + offset;
170     intptr_t dumptime_oop = (intptr_t)((void*)*p);
171     assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
172     intptr_t runtime_oop = dumptime_oop + _delta;
173     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
174     return true;
175   }
176 };
177 
178 void AOTMappedHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
179                                                              FileMapInfo* info,
180                                                              MemRegion region) {
181   narrowOop dt_encoded_bottom = encoded_heap_region_dumptime_address(info);
182   narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
183   log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
184                   (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
185 
186   // Optimization: if dumptime shift is the same as runtime shift, we can perform a
187   // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
188   narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos();
189   if (_narrow_oop_shift == CompressedOops::shift()) {
190     uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
191     log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
192     if (quick_delta == 0) {
193       log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
194     } else {
195       PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
196       bm.iterate(&patcher);
197     }
198   } else {
199     log_info(aot)("heap data quick relocation not possible");
200     PatchCompressedEmbeddedPointers patcher(patching_start);
201     bm.iterate(&patcher);
202   }
203 }
204 
205 // Patch all the non-null pointers that are embedded in the archived heap objects
206 // in this (mapped) region
207 void AOTMappedHeapLoader::patch_embedded_pointers(FileMapInfo* info,
208                                                   MemRegion region, address oopmap,
209                                                   size_t oopmap_size_in_bits) {
210   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
211   if (UseCompressedOops) {
212     patch_compressed_embedded_pointers(bm, info, region);
213   } else {
214     PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos());
215     bm.iterate(&patcher);
216   }
217 }
218 
219 // ------------------ Support for Region LOADING -----------------------------------------
220 
221 // The CDS archive remembers each heap object by its address at dump time, but
222 // the heap object may be loaded at a different address at run time. This structure is used
223 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
224 // to their runtime addresses.
225 struct AOTMappedHeapRegion {
226   int       _region_index;   // index for FileMapInfo::space_at(index)
227   size_t    _region_size;    // number of bytes in this region
228   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
229   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
230                              // runtime address is P + _runtime_offset
231   uintptr_t top() {
232     return _dumptime_base + _region_size;
233   }
234 };
235 
236 void AOTMappedHeapLoader::init_loaded_heap_relocation(AOTMappedHeapRegion* loaded_region) {
237   _dumptime_base = loaded_region->_dumptime_base;
238   _dumptime_top = loaded_region->top();
239   _runtime_offset = loaded_region->_runtime_offset;
240 }
241 
242 bool AOTMappedHeapLoader::can_load() {
243   return Universe::heap()->can_load_archived_objects();
244 }
245 
246 class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
247   narrowOop* _start;
248   intx _offset;
249   uintptr_t _base;
250   uintptr_t _top;
251 
252  public:
253   PatchLoadedRegionPointers(narrowOop* start, AOTMappedHeapRegion* loaded_region)
254     : _start(start),
255       _offset(loaded_region->_runtime_offset),
256       _base(loaded_region->_dumptime_base),
257       _top(loaded_region->top()) {}
258 
259   bool do_bit(size_t offset) {
260     assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
261     narrowOop* p = _start + offset;
262     narrowOop v = *p;
263     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
264     uintptr_t o = cast_from_oop<uintptr_t>(AOTMappedHeapLoader::decode_from_archive(v));
265     assert(_base <= o && o < _top, "must be");
266 
267     o += _offset;
268     AOTMappedHeapLoader::assert_in_loaded_heap(o);
269     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
270     return true;
271   }
272 };
273 
274 bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region,
275                                              MemRegion& archive_space) {
276   size_t total_bytes = 0;
277   FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
278   r->assert_is_heap_region();
279   if (r->used() == 0) {
280     return false;
281   }
282 
283   assert(is_aligned(r->used(), HeapWordSize), "must be");
284   total_bytes += r->used();
285   loaded_region->_region_index = AOTMetaspace::hp;
286   loaded_region->_region_size = r->used();
287   loaded_region->_dumptime_base = (uintptr_t)heap_region_dumptime_address(mapinfo);
288 
289   assert(is_aligned(total_bytes, HeapWordSize), "must be");
290   size_t word_size = total_bytes / HeapWordSize;
291   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
292   if (buffer == nullptr) {
293     return false;
294   }
295 
296   archive_space = MemRegion(buffer, word_size);
297   _loaded_heap_bottom = (uintptr_t)archive_space.start();
298   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
299 
300   loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
301 
302   return true;
303 }
304 
305 bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, AOTMappedHeapRegion* loaded_region,
306                                                 uintptr_t load_address) {
307   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
308   if (bitmap_base == 0) {
309     _loading_failed = true;
310     return false; // OOM or CRC error
311   }
312 
313   FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
314   if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
315     // There's no easy way to free the buffer, so we will fill it with zero later
316     // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
317     log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
318     _loading_failed = true;
319     return false;
320   }
321   assert(r->mapped_base() == (char*)load_address, "sanity");
322   log_info(aot)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
323                 " size %6zu delta %zd",
324                 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
325                 loaded_region->_region_size, loaded_region->_runtime_offset);
326 
327   uintptr_t oopmap = bitmap_base + r->oopmap_offset();
328   BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
329 
330   if (UseCompressedOops) {
331     PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region);
332     bm.iterate(&patcher);
333   } else {
334     PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region->_runtime_offset);
335     bm.iterate(&patcher);
336   }
337   return true;
338 }
339 
340 bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
341   assert(can_load(), "loaded heap for must be supported");
342   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
343 
344   AOTMappedHeapRegion loaded_region;
345   memset(&loaded_region, 0, sizeof(loaded_region));
346 
347   MemRegion archive_space;
348   if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
349     return false;
350   }
351 
352   if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
353     assert(_loading_failed, "must be");
354     return false;
355   }
356 
357   init_loaded_heap_relocation(&loaded_region);
358   _is_loaded = true;
359 
360   return true;
361 }
362 
363 objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
364   if (!CDSConfig::is_using_archive()) {
365     assert(CDSConfig::is_dumping_heap() && Thread::current() == (Thread*)VMThread::vm_thread(), "sanity");
366   }
367 
368   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
369   assert(segment != nullptr, "should have been initialized");
370   return segment;
371 }
372 
373 void AOTMappedHeapLoader::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
374   assert(_root_segment_max_size_elems > 0, "sanity");
375 
376   // Try to avoid divisions for the common case.
377   if (idx < _root_segment_max_size_elems) {
378     seg_idx = 0;
379     int_idx = idx;
380   } else {
381     seg_idx = idx / _root_segment_max_size_elems;
382     int_idx = idx % _root_segment_max_size_elems;
383   }
384 
385   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
386          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
387 }
388 
389 void AOTMappedHeapLoader::add_root_segment(objArrayOop segment_oop) {
390   assert(segment_oop != nullptr, "must be");
391   assert(is_in_use(), "must be");
392   if (_root_segments == nullptr) {
393     _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
394   }
395   _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
396 }
397 
398 void AOTMappedHeapLoader::init_root_segment_sizes(int max_size_elems) {
399   _root_segment_max_size_elems = max_size_elems;
400 }
401 
402 oop AOTMappedHeapLoader::get_root(int index) {
403   assert(!_root_segments->is_empty(), "must have loaded shared heap");
404   int seg_idx, int_idx;
405   get_segment_indexes(index, seg_idx, int_idx);
406   objArrayOop result = objArrayOop(root_segment(seg_idx));
407   return result->obj_at(int_idx);
408 }
409 
410 void AOTMappedHeapLoader::clear_root(int index) {
411   int seg_idx, int_idx;
412   get_segment_indexes(index, seg_idx, int_idx);
413   root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
414 }
415 
416 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
417   HashTable<uintptr_t, bool>* _table;
418 
419  public:
420   VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
421 
422   virtual void do_oop(narrowOop* p) {
423     // This should be called before the loaded region is modified, so all the embedded pointers
424     // must be null, or must point to a valid object in the loaded region.
425     narrowOop v = *p;
426     if (!CompressedOops::is_null(v)) {
427       oop o = CompressedOops::decode_not_null(v);
428       uintptr_t u = cast_from_oop<uintptr_t>(o);
429       AOTMappedHeapLoader::assert_in_loaded_heap(u);
430       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
431     }
432   }
433   virtual void do_oop(oop* p) {
434     oop v = *p;
435     if(v != nullptr) {
436       uintptr_t u = cast_from_oop<uintptr_t>(v);
437       AOTMappedHeapLoader::assert_in_loaded_heap(u);
438       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
439     }
440   }
441 };
442 
443 void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
444   patch_heap_embedded_pointers(info);
445 
446   if (is_loaded()) {
447     // These operations are needed only when the heap is loaded (not mapped).
448     finish_loaded_heap();
449     if (VerifyArchivedFields > 0) {
450       verify_loaded_heap();
451     }
452   }
453   if (is_in_use()) {
454     patch_native_pointers();
455     intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
456 
457     // The heap roots are stored in one or more segments that are laid out consecutively.
458     // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
459     HeapRootSegments segments = FileMapInfo::current_info()->mapped_heap()->root_segments();
460     init_root_segment_sizes(segments.max_size_in_elems());
461     intptr_t first_segment_addr = bottom + segments.base_offset();
462     for (size_t c = 0; c < segments.count(); c++) {
463       oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
464       assert(segment_oop->is_objArray(), "Must be");
465       add_root_segment((objArrayOop)segment_oop);
466     }
467 
468     if (CDSConfig::is_dumping_final_static_archive()) {
469       StringTable::move_shared_strings_into_runtime_table();
470     }
471   }
472 }
473 
474 void AOTMappedHeapLoader::finish_loaded_heap() {
475   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
476   HeapWord* top    = (HeapWord*)_loaded_heap_top;
477 
478   MemRegion archive_space = MemRegion(bottom, top);
479   Universe::heap()->complete_loaded_archive_space(archive_space);
480 }
481 
482 void AOTMappedHeapLoader::verify_loaded_heap() {
483   log_info(aot, heap)("Verify all oops and pointers in loaded heap");
484 
485   ResourceMark rm;
486   HashTable<uintptr_t, bool> table;
487   VerifyLoadedHeapEmbeddedPointers verifier(&table);
488   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
489   HeapWord* top    = (HeapWord*)_loaded_heap_top;
490 
491   for (HeapWord* p = bottom; p < top; ) {
492     oop o = cast_to_oop(p);
493     table.put(cast_from_oop<uintptr_t>(o), true);
494     p += o->size();
495   }
496 
497   for (HeapWord* p = bottom; p < top; ) {
498     oop o = cast_to_oop(p);
499     o->oop_iterate(&verifier);
500     p += o->size();
501   }
502 }
503 
504 void AOTMappedHeapLoader::fill_failed_loaded_heap() {
505   assert(_loading_failed, "must be");
506   if (_loaded_heap_bottom != 0) {
507     assert(_loaded_heap_top != 0, "must be");
508     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
509     HeapWord* top = (HeapWord*)_loaded_heap_top;
510     Universe::heap()->fill_with_objects(bottom, top - bottom);
511   }
512 }
513 
514 class PatchNativePointers: public BitMapClosure {
515   Metadata** _start;
516 
517  public:
518   PatchNativePointers(Metadata** start) : _start(start) {}
519 
520   bool do_bit(size_t offset) {
521     Metadata** p = _start + offset;
522     *p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
523     return true;
524   }
525 };
526 
527 void AOTMappedHeapLoader::patch_native_pointers() {
528   if (AOTMetaspace::relocation_delta() == 0) {
529     return;
530   }
531 
532   FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
533   if (r->mapped_base() != nullptr && r->has_ptrmap()) {
534     log_info(aot, heap)("Patching native pointers in heap region");
535     BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
536     PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->mapped_heap()->ptrmap_start_pos());
537     bm.iterate(&patcher);
538   }
539 }
540 
541 // The actual address of this region during dump time.
542 address AOTMappedHeapLoader::heap_region_dumptime_address(FileMapInfo* info) {
543   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
544   assert(CDSConfig::is_using_archive(), "runtime only");
545   assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
546   if (UseCompressedOops) {
547     return /*dumptime*/ (address)((uintptr_t)info->narrow_oop_base() + r->mapping_offset());
548   } else {
549     return heap_region_requested_address(info);
550   }
551 }
552 
553 // The address where this region can be mapped into the runtime heap without
554 // patching any of the pointers that are embedded in this region.
555 address AOTMappedHeapLoader::heap_region_requested_address(FileMapInfo* info) {
556   assert(CDSConfig::is_using_archive(), "runtime only");
557   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
558   assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
559   assert(can_use(), "cannot be used by AOTMappedHeapLoader::can_load() mode");
560   if (UseCompressedOops) {
561     // We can avoid relocation if each region's offset from the runtime CompressedOops::base()
562     // is the same as its offset from the CompressedOops::base() during dumptime.
563     // Note that CompressedOops::base() may be different between dumptime and runtime.
564     //
565     // Example:
566     // Dumptime base = 0x1000 and shift is 0. We have a region at address 0x2000. There's a
567     // narrowOop P stored in this region that points to an object at address 0x2200.
568     // P's encoded value is 0x1200.
569     //
570     // Runtime base = 0x4000 and shift is also 0. If we map this region at 0x5000, then
571     // the value P can remain 0x1200. The decoded address = (0x4000 + (0x1200 << 0)) = 0x5200,
572     // which is the runtime location of the referenced object.
573     return /*runtime*/ (address)((uintptr_t)CompressedOops::base() + r->mapping_offset());
574   } else {
575     // This was the hard-coded requested base address used at dump time. With uncompressed oops,
576     // the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
577     // what base address was picked at duump time.
578     return (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
579   }
580 }
581 
582 bool AOTMappedHeapLoader::map_heap_region(FileMapInfo* info) {
583   if (map_heap_region_impl(info)) {
584 #ifdef ASSERT
585     // The "old" regions must be parsable -- we cannot have any unused space
586     // at the start of the lowest G1 region that contains archived objects.
587     assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
588 
589     // Make sure we map at the very top of the heap - see comments in
590     // init_heap_region_relocation().
591     MemRegion heap_range = G1CollectedHeap::heap()->reserved();
592     assert(heap_range.contains(_mapped_heap_memregion), "must be");
593 
594     address heap_end = (address)heap_range.end();
595     address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
596     assert(heap_end >= mapped_heap_region_end, "must be");
597     assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
598            "must be at the top of the heap to avoid fragmentation");
599 #endif
600 
601     set_mapped();
602     return true;
603   } else {
604     return false;
605   }
606 }
607 
608 bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
609   assert(UseG1GC, "the following code assumes G1");
610 
611   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
612   size_t size = r->used();
613   if (size == 0) {
614     return false; // no archived java heap data
615   }
616 
617   size_t word_size = size / HeapWordSize;
618   address requested_start = heap_region_requested_address(info);
619 
620   aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
621 
622   // allocate from java heap
623   HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size);
624   if (start == nullptr) {
625     AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
626     return false;
627   }
628 
629   _mapped_heap_memregion = MemRegion(start, word_size);
630 
631   // Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
632   // for mapped region as it is part of the reserved java heap, which is already recorded.
633   char* addr = (char*)_mapped_heap_memregion.start();
634   char* base;
635 
636   if (AOTMetaspace::use_windows_memory_mapping() || UseLargePages) {
637     // With UseLargePages, memory mapping may fail on some OSes if the size is not
638     // large page aligned, so let's use read() instead. In this case, the memory region
639     // is already commited by G1 so we don't need to commit it again.
640     if (!info->read_region(AOTMetaspace::hp, addr,
641                            align_up(_mapped_heap_memregion.byte_size(), os::vm_page_size()),
642                            /* do_commit = */ !UseLargePages)) {
643       dealloc_heap_region(info);
644       aot_log_error(aot)("Failed to read archived heap region into " INTPTR_FORMAT, p2i(addr));
645       return false;
646     }
647     // Checks for VerifySharedSpaces is already done inside read_region()
648     base = addr;
649   } else {
650     base = info->map_heap_region(r, addr, _mapped_heap_memregion.byte_size());
651     if (base == nullptr || base != addr) {
652       dealloc_heap_region(info);
653       AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to map at required address in java heap. "
654                                             INTPTR_FORMAT ", size = %zu bytes",
655                                             p2i(addr), _mapped_heap_memregion.byte_size());
656       return false;
657     }
658 
659     if (VerifySharedSpaces && !r->check_region_crc(base)) {
660       dealloc_heap_region(info);
661       AOTMetaspace::report_loading_error("UseSharedSpaces: mapped heap region is corrupt");
662       return false;
663     }
664   }
665 
666   r->set_mapped_base(base);
667 
668   // If the requested range is different from the range allocated by GC, then
669   // the pointers need to be patched.
670   address mapped_start = (address) _mapped_heap_memregion.start();
671   ptrdiff_t delta = mapped_start - requested_start;
672   if (UseCompressedOops &&
673       (info->narrow_oop_mode() != CompressedOops::mode() ||
674        info->narrow_oop_shift() != CompressedOops::shift())) {
675     _heap_pointers_need_patching = true;
676   }
677   if (delta != 0) {
678     _heap_pointers_need_patching = true;
679   }
680   init_mapped_heap_info(mapped_start, delta, info->narrow_oop_shift());
681 
682   if (_heap_pointers_need_patching) {
683     char* bitmap_base = info->map_bitmap_region();
684     if (bitmap_base == nullptr) {
685       AOTMetaspace::report_loading_error("CDS heap cannot be used because bitmap region cannot be mapped");
686       dealloc_heap_region(info);
687       _heap_pointers_need_patching = false;
688       return false;
689     }
690   }
691   aot_log_info(aot)("Heap data mapped at " INTPTR_FORMAT ", size = %8zu bytes",
692                 p2i(mapped_start), _mapped_heap_memregion.byte_size());
693   aot_log_info(aot)("CDS heap data relocation delta = %zd bytes", delta);
694   return true;
695 }
696 
697 narrowOop AOTMappedHeapLoader::encoded_heap_region_dumptime_address(FileMapInfo* info) {
698   assert(CDSConfig::is_using_archive(), "runtime only");
699   assert(UseCompressedOops, "sanity");
700   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
701   return CompressedOops::narrow_oop_cast(r->mapping_offset() >> info->narrow_oop_shift());
702 }
703 
704 void AOTMappedHeapLoader::patch_heap_embedded_pointers(FileMapInfo* info) {
705   if (!info->is_mapped() || !_heap_pointers_need_patching) {
706     return;
707   }
708 
709   char* bitmap_base = info->map_bitmap_region();
710   assert(bitmap_base != nullptr, "must have already been mapped");
711 
712   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
713   patch_embedded_pointers(
714       info, _mapped_heap_memregion,
715       (address)(info->region_at(AOTMetaspace::bm)->mapped_base()) + r->oopmap_offset(),
716       r->oopmap_size_in_bits());
717 }
718 
719 void AOTMappedHeapLoader::fixup_mapped_heap_region(FileMapInfo* info) {
720   if (is_mapped()) {
721     assert(!_mapped_heap_memregion.is_empty(), "sanity");
722 
723     // Populate the archive regions' G1BlockOffsetTables. That ensures
724     // fast G1BlockOffsetTable::block_start operations for any given address
725     // within the archive regions when trying to find start of an object
726     // (e.g. during card table scanning).
727     G1CollectedHeap::heap()->populate_archive_regions_bot(_mapped_heap_memregion);
728   }
729 }
730 
731 // dealloc the archive regions from java heap
732 void AOTMappedHeapLoader::dealloc_heap_region(FileMapInfo* info) {
733   G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
734 }
735 
736 AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
737   class MappedLoaderOopIterator : public AOTMappedHeapOopIterator {
738   public:
739     MappedLoaderOopIterator(address buffer_start,
740                             address buffer_end,
741                             address requested_base,
742                             address requested_start,
743                             int requested_shift,
744                             size_t num_root_segments) :
745       AOTMappedHeapOopIterator(buffer_start,
746                                buffer_end,
747                                requested_base,
748                                requested_start,
749                                requested_shift,
750                                num_root_segments) {}
751 
752     AOTMapLogger::OopData capture(address buffered_addr) override {
753       oopDesc* raw_oop = (oopDesc*)buffered_addr;
754       size_t size = raw_oop->size();
755       address requested_addr = buffered_addr + _buffer_to_requested_delta;
756       intptr_t target_location = intptr_t(requested_addr);
757       uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
758       uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
759       Klass* klass = raw_oop->klass();
760 
761       return { buffered_addr,
762                requested_addr,
763                target_location,
764                narrow_location,
765                raw_oop,
766                klass,
767                size,
768                false };
769     }
770   };
771 
772   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
773   address requested_base = UseCompressedOops ? (address)info->narrow_oop_base() : heap_region_requested_address(info);
774   address requested_start = requested_base + r->mapping_offset();
775   int requested_shift = info->narrow_oop_shift();
776 
777   return new MappedLoaderOopIterator(buffer_start,
778                                      buffer_end,
779                                      requested_base,
780                                      requested_start,
781                                      requested_shift,
782                                      info->mapped_heap()->root_segments().count());
783 }
784 
785 #endif // INCLUDE_CDS_JAVA_HEAP