1 /*
  2  * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "cds/aotLogging.hpp"
 26 #include "cds/aotMappedHeapLoader.inline.hpp"
 27 #include "cds/aotMappedHeapWriter.hpp"
 28 #include "cds/aotMetaspace.hpp"
 29 #include "cds/cdsConfig.hpp"
 30 #include "cds/heapShared.inline.hpp"
 31 #include "classfile/classLoaderDataShared.hpp"
 32 #include "classfile/stringTable.hpp"
 33 #include "classfile/systemDictionaryShared.hpp"
 34 #include "gc/shared/collectedHeap.hpp"
 35 #include "logging/log.hpp"
 36 #include "logging/logMessage.hpp"
 37 #include "logging/logStream.hpp"
 38 #include "logging/logTag.hpp"
 39 #include "memory/allocation.inline.hpp"
 40 #include "memory/iterator.inline.hpp"
 41 #include "memory/resourceArea.hpp"
 42 #include "memory/universe.hpp"
 43 #include "sanitizers/ub.hpp"
 44 #include "utilities/bitMap.inline.hpp"
 45 #include "utilities/copy.hpp"
 46 #if INCLUDE_G1GC
 47 #include "gc/g1/g1CollectedHeap.hpp"
 48 #include "gc/g1/g1HeapRegion.hpp"
 49 #endif
 50 
 51 #if INCLUDE_CDS_JAVA_HEAP
 52 
 53 bool AOTMappedHeapLoader::_is_mapped = false;
 54 bool AOTMappedHeapLoader::_is_loaded = false;
 55 
 56 bool    AOTMappedHeapLoader::_narrow_oop_base_initialized = false;
 57 address AOTMappedHeapLoader::_narrow_oop_base;
 58 int     AOTMappedHeapLoader::_narrow_oop_shift;
 59 
 60 // Support for loaded heap.
 61 uintptr_t AOTMappedHeapLoader::_loaded_heap_bottom = 0;
 62 uintptr_t AOTMappedHeapLoader::_loaded_heap_top = 0;
 63 uintptr_t AOTMappedHeapLoader::_dumptime_base = UINTPTR_MAX;
 64 uintptr_t AOTMappedHeapLoader::_dumptime_top = 0;
 65 intx AOTMappedHeapLoader::_runtime_offset = 0;
 66 bool AOTMappedHeapLoader::_loading_failed = false;
 67 
 68 // Support for mapped heap.
 69 uintptr_t AOTMappedHeapLoader::_mapped_heap_bottom = 0;
 70 bool      AOTMappedHeapLoader::_mapped_heap_relocation_initialized = false;
 71 ptrdiff_t AOTMappedHeapLoader::_mapped_heap_delta = 0;
 72 
 73 // Heap roots
 74 GrowableArrayCHeap<OopHandle, mtClassShared>* AOTMappedHeapLoader::_root_segments = nullptr;
 75 int AOTMappedHeapLoader::_root_segment_max_size_elems;
 76 
 77 MemRegion AOTMappedHeapLoader::_mapped_heap_memregion;
 78 bool AOTMappedHeapLoader::_heap_pointers_need_patching;
 79 
 80 // Every mapped region is offset by _mapped_heap_delta from its requested address.
 81 // See FileMapInfo::heap_region_requested_address().
 82 ATTRIBUTE_NO_UBSAN
 83 void AOTMappedHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
 84   assert(!_mapped_heap_relocation_initialized, "only once");
 85   if (!UseCompressedOops) {
 86     assert(dumptime_oop_shift == 0, "sanity");
 87   }
 88   assert(can_map(), "sanity");
 89   init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
 90   _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
 91   _mapped_heap_delta = delta;
 92   _mapped_heap_relocation_initialized = true;
 93 }
 94 
 95 void AOTMappedHeapLoader::init_narrow_oop_decoding(address base, int shift) {
 96   assert(!_narrow_oop_base_initialized, "only once");
 97   _narrow_oop_base_initialized = true;
 98   _narrow_oop_base = base;
 99   _narrow_oop_shift = shift;
100 }
101 
102 void AOTMappedHeapLoader::fixup_region() {
103   FileMapInfo* mapinfo = FileMapInfo::current_info();
104   if (is_mapped()) {
105     fixup_mapped_heap_region(mapinfo);
106   } else if (_loading_failed) {
107     fill_failed_loaded_heap();
108   }
109 }
110 
111 // ------------------ Support for Region MAPPING -----------------------------------------
112 
113 // Patch all the embedded oop pointers inside an archived heap region,
114 // to be consistent with the runtime oop encoding.
115 class PatchCompressedEmbeddedPointers: public BitMapClosure {
116   narrowOop* _start;
117 
118  public:
119   PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
120 
121   bool do_bit(size_t offset) {
122     narrowOop* p = _start + offset;
123     narrowOop v = *p;
124     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
125     oop o = AOTMappedHeapLoader::decode_from_mapped_archive(v);
126     RawAccess<IS_NOT_NULL>::oop_store(p, o);
127     return true;
128   }
129 };
130 
131 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
132   narrowOop* _start;
133   uint32_t _delta;
134 
135  public:
136   PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
137 
138   bool do_bit(size_t offset) {
139     narrowOop* p = _start + offset;
140     narrowOop v = *p;
141     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
142     narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
143     assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
144 #ifdef ASSERT
145     oop o1 = AOTMappedHeapLoader::decode_from_mapped_archive(v);
146     oop o2 = CompressedOops::decode_not_null(new_v);
147     assert(o1 == o2, "quick delta must work");
148 #endif
149     RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
150     return true;
151   }
152 };
153 
154 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
155   oop* _start;
156   intptr_t _delta;
157 
158  public:
159   PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
160     _start(start),
161     _delta(runtime_offset) {}
162 
163   PatchUncompressedEmbeddedPointers(oop* start) :
164     _start(start),
165     _delta(AOTMappedHeapLoader::mapped_heap_delta()) {}
166 
167   bool do_bit(size_t offset) {
168     oop* p = _start + offset;
169     intptr_t dumptime_oop = (intptr_t)((void*)*p);
170     assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
171     intptr_t runtime_oop = dumptime_oop + _delta;
172     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
173     return true;
174   }
175 };
176 
177 void AOTMappedHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
178                                                              FileMapInfo* info,
179                                                              MemRegion region) {
180   narrowOop dt_encoded_bottom = encoded_heap_region_dumptime_address(info);
181   narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
182   log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
183                   (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
184 
185   // Optimization: if dumptime shift is the same as runtime shift, we can perform a
186   // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
187   narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos();
188   if (_narrow_oop_shift == CompressedOops::shift()) {
189     uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
190     log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
191     if (quick_delta == 0) {
192       log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
193     } else {
194       PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
195       bm.iterate(&patcher);
196     }
197   } else {
198     log_info(aot)("heap data quick relocation not possible");
199     PatchCompressedEmbeddedPointers patcher(patching_start);
200     bm.iterate(&patcher);
201   }
202 }
203 
204 // Patch all the non-null pointers that are embedded in the archived heap objects
205 // in this (mapped) region
206 void AOTMappedHeapLoader::patch_embedded_pointers(FileMapInfo* info,
207                                                   MemRegion region, address oopmap,
208                                                   size_t oopmap_size_in_bits) {
209   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
210   if (UseCompressedOops) {
211     patch_compressed_embedded_pointers(bm, info, region);
212   } else {
213     PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos());
214     bm.iterate(&patcher);
215   }
216 }
217 
218 // ------------------ Support for Region LOADING -----------------------------------------
219 
220 // The CDS archive remembers each heap object by its address at dump time, but
221 // the heap object may be loaded at a different address at run time. This structure is used
222 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
223 // to their runtime addresses.
224 struct LoadedArchiveHeapRegion {
225   int       _region_index;   // index for FileMapInfo::space_at(index)
226   size_t    _region_size;    // number of bytes in this region
227   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
228   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
229                              // runtime address is P + _runtime_offset
230   uintptr_t top() {
231     return _dumptime_base + _region_size;
232   }
233 };
234 
235 void AOTMappedHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
236   _dumptime_base = loaded_region->_dumptime_base;
237   _dumptime_top = loaded_region->top();
238   _runtime_offset = loaded_region->_runtime_offset;
239 }
240 
241 bool AOTMappedHeapLoader::can_load() {
242   return Universe::heap()->can_load_archived_objects();
243 }
244 
245 class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
246   narrowOop* _start;
247   intx _offset;
248   uintptr_t _base;
249   uintptr_t _top;
250 
251  public:
252   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
253     : _start(start),
254       _offset(loaded_region->_runtime_offset),
255       _base(loaded_region->_dumptime_base),
256       _top(loaded_region->top()) {}
257 
258   bool do_bit(size_t offset) {
259     assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
260     narrowOop* p = _start + offset;
261     narrowOop v = *p;
262     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
263     uintptr_t o = cast_from_oop<uintptr_t>(AOTMappedHeapLoader::decode_from_archive(v));
264     assert(_base <= o && o < _top, "must be");
265 
266     o += _offset;
267     AOTMappedHeapLoader::assert_in_loaded_heap(o);
268     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
269     return true;
270   }
271 };
272 
273 bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
274                                              MemRegion& archive_space) {
275   size_t total_bytes = 0;
276   FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
277   r->assert_is_heap_region();
278   if (r->used() == 0) {
279     return false;
280   }
281 
282   assert(is_aligned(r->used(), HeapWordSize), "must be");
283   total_bytes += r->used();
284   loaded_region->_region_index = AOTMetaspace::hp;
285   loaded_region->_region_size = r->used();
286   loaded_region->_dumptime_base = (uintptr_t)heap_region_dumptime_address(mapinfo);
287 
288   assert(is_aligned(total_bytes, HeapWordSize), "must be");
289   size_t word_size = total_bytes / HeapWordSize;
290   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
291   if (buffer == nullptr) {
292     return false;
293   }
294 
295   archive_space = MemRegion(buffer, word_size);
296   _loaded_heap_bottom = (uintptr_t)archive_space.start();
297   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
298 
299   loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
300 
301   return true;
302 }
303 
304 bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
305                                                 uintptr_t load_address) {
306   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
307   if (bitmap_base == 0) {
308     _loading_failed = true;
309     return false; // OOM or CRC error
310   }
311 
312   FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
313   if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
314     // There's no easy way to free the buffer, so we will fill it with zero later
315     // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
316     log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
317     _loading_failed = true;
318     return false;
319   }
320   assert(r->mapped_base() == (char*)load_address, "sanity");
321   log_info(aot)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
322                 " size %6zu delta %zd",
323                 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
324                 loaded_region->_region_size, loaded_region->_runtime_offset);
325 
326   uintptr_t oopmap = bitmap_base + r->oopmap_offset();
327   BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
328 
329   if (UseCompressedOops) {
330     PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region);
331     bm.iterate(&patcher);
332   } else {
333     PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region->_runtime_offset);
334     bm.iterate(&patcher);
335   }
336   return true;
337 }
338 
339 bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
340   assert(can_load(), "loaded heap for must be supported");
341   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
342 
343   LoadedArchiveHeapRegion loaded_region;
344   memset(&loaded_region, 0, sizeof(loaded_region));
345 
346   MemRegion archive_space;
347   if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
348     return false;
349   }
350 
351   if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
352     assert(_loading_failed, "must be");
353     return false;
354   }
355 
356   init_loaded_heap_relocation(&loaded_region);
357   _is_loaded = true;
358 
359   return true;
360 }
361 
362 objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
363   if (CDSConfig::is_dumping_heap() && !CDSConfig::is_dumping_final_static_archive()) {
364     assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
365   } else {
366     assert(CDSConfig::is_using_archive(), "must be");
367   }
368 
369   objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
370   assert(segment != nullptr, "should have been initialized");
371   return segment;
372 }
373 
374 void AOTMappedHeapLoader::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
375   assert(_root_segment_max_size_elems > 0, "sanity");
376 
377   // Try to avoid divisions for the common case.
378   if (idx < _root_segment_max_size_elems) {
379     seg_idx = 0;
380     int_idx = idx;
381   } else {
382     seg_idx = idx / _root_segment_max_size_elems;
383     int_idx = idx % _root_segment_max_size_elems;
384   }
385 
386   assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
387          "sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
388 }
389 
390 void AOTMappedHeapLoader::add_root_segment(objArrayOop segment_oop) {
391   assert(segment_oop != nullptr, "must be");
392   assert(is_in_use(), "must be");
393   if (_root_segments == nullptr) {
394     _root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
395   }
396   _root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
397 }
398 
399 void AOTMappedHeapLoader::init_root_segment_sizes(int max_size_elems) {
400   _root_segment_max_size_elems = max_size_elems;
401 }
402 
403 oop AOTMappedHeapLoader::get_root(int index) {
404   assert(!_root_segments->is_empty(), "must have loaded shared heap");
405   int seg_idx, int_idx;
406   get_segment_indexes(index, seg_idx, int_idx);
407   objArrayOop result = objArrayOop(root_segment(seg_idx));
408   return result->obj_at(int_idx);
409 }
410 
411 void AOTMappedHeapLoader::clear_root(int index) {
412   int seg_idx, int_idx;
413   get_segment_indexes(index, seg_idx, int_idx);
414   root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
415 }
416 
417 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
418   HashTable<uintptr_t, bool>* _table;
419 
420  public:
421   VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
422 
423   virtual void do_oop(narrowOop* p) {
424     // This should be called before the loaded region is modified, so all the embedded pointers
425     // must be null, or must point to a valid object in the loaded region.
426     narrowOop v = *p;
427     if (!CompressedOops::is_null(v)) {
428       oop o = CompressedOops::decode_not_null(v);
429       uintptr_t u = cast_from_oop<uintptr_t>(o);
430       AOTMappedHeapLoader::assert_in_loaded_heap(u);
431       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
432     }
433   }
434   virtual void do_oop(oop* p) {
435     oop v = *p;
436     if(v != nullptr) {
437       uintptr_t u = cast_from_oop<uintptr_t>(v);
438       AOTMappedHeapLoader::assert_in_loaded_heap(u);
439       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
440     }
441   }
442 };
443 
444 void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
445   patch_heap_embedded_pointers(info);
446 
447   if (is_loaded()) {
448     // These operations are needed only when the heap is loaded (not mapped).
449     finish_loaded_heap();
450     if (VerifyArchivedFields > 0) {
451       verify_loaded_heap();
452     }
453   }
454   if (is_in_use()) {
455     patch_native_pointers();
456     intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
457 
458     // The heap roots are stored in one or more segments that are laid out consecutively.
459     // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
460     HeapRootSegments segments = FileMapInfo::current_info()->mapped_heap()->root_segments();
461     init_root_segment_sizes(segments.max_size_in_elems());
462     intptr_t first_segment_addr = bottom + segments.base_offset();
463     for (size_t c = 0; c < segments.count(); c++) {
464       oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
465       assert(segment_oop->is_objArray(), "Must be");
466       add_root_segment((objArrayOop)segment_oop);
467     }
468 
469     StringTable::load_shared_strings_array();
470   }
471 }
472 
473 void AOTMappedHeapLoader::finish_loaded_heap() {
474   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
475   HeapWord* top    = (HeapWord*)_loaded_heap_top;
476 
477   MemRegion archive_space = MemRegion(bottom, top);
478   Universe::heap()->complete_loaded_archive_space(archive_space);
479 }
480 
481 void AOTMappedHeapLoader::verify_loaded_heap() {
482   log_info(aot, heap)("Verify all oops and pointers in loaded heap");
483 
484   ResourceMark rm;
485   HashTable<uintptr_t, bool> table;
486   VerifyLoadedHeapEmbeddedPointers verifier(&table);
487   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
488   HeapWord* top    = (HeapWord*)_loaded_heap_top;
489 
490   for (HeapWord* p = bottom; p < top; ) {
491     oop o = cast_to_oop(p);
492     table.put(cast_from_oop<uintptr_t>(o), true);
493     p += o->size();
494   }
495 
496   for (HeapWord* p = bottom; p < top; ) {
497     oop o = cast_to_oop(p);
498     o->oop_iterate(&verifier);
499     p += o->size();
500   }
501 }
502 
503 void AOTMappedHeapLoader::fill_failed_loaded_heap() {
504   assert(_loading_failed, "must be");
505   if (_loaded_heap_bottom != 0) {
506     assert(_loaded_heap_top != 0, "must be");
507     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
508     HeapWord* top = (HeapWord*)_loaded_heap_top;
509     Universe::heap()->fill_with_objects(bottom, top - bottom);
510   }
511 }
512 
513 oop AOTMappedHeapLoader::oop_from_offset(int offset) {
514   // Once GC starts, the offsets saved in CachedCodeDirectoryInternal::_permanent_oop_offsets
515   // will become invalid. I don't know what function can check if GCs are allowed, but surely
516   // GCs can't happen before the Object class is loaded.
517   assert(CDSConfig::is_using_archive(), "sanity");
518   assert(vmClasses::Object_klass()->class_loader_data() == nullptr,
519          "can be called only very early during VM start-up");
520   if (is_loaded()) {
521     return cast_to_oop(_loaded_heap_bottom + offset);
522   } else {
523     assert(is_mapped(), "must be");
524     return cast_to_oop(_mapped_heap_bottom + offset);
525   }
526 }
527 
528 class PatchNativePointers: public BitMapClosure {
529   Metadata** _start;
530 
531  public:
532   PatchNativePointers(Metadata** start) : _start(start) {}
533 
534   bool do_bit(size_t offset) {
535     Metadata** p = _start + offset;
536     *p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
537     return true;
538   }
539 };
540 
541 void AOTMappedHeapLoader::patch_native_pointers() {
542   if (AOTMetaspace::relocation_delta() == 0) {
543     return;
544   }
545 
546   FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
547   if (r->mapped_base() != nullptr && r->has_ptrmap()) {
548     log_info(aot, heap)("Patching native pointers in heap region");
549     BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
550     PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->mapped_heap()->ptrmap_start_pos());
551     bm.iterate(&patcher);
552   }
553 }
554 
555 // The actual address of this region during dump time.
556 address AOTMappedHeapLoader::heap_region_dumptime_address(FileMapInfo* info) {
557   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
558   assert(CDSConfig::is_using_archive(), "runtime only");
559   assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
560   if (UseCompressedOops) {
561     return /*dumptime*/ (address)((uintptr_t)info->narrow_oop_base() + r->mapping_offset());
562   } else {
563     return heap_region_requested_address(info);
564   }
565 }
566 
567 // The address where this region can be mapped into the runtime heap without
568 // patching any of the pointers that are embedded in this region.
569 address AOTMappedHeapLoader::heap_region_requested_address(FileMapInfo* info) {
570   assert(CDSConfig::is_using_archive(), "runtime only");
571   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
572   assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
573   assert(can_use(), "cannot be used by AOTMappedHeapLoader::can_load() mode");
574   if (UseCompressedOops) {
575     // We can avoid relocation if each region's offset from the runtime CompressedOops::base()
576     // is the same as its offset from the CompressedOops::base() during dumptime.
577     // Note that CompressedOops::base() may be different between dumptime and runtime.
578     //
579     // Example:
580     // Dumptime base = 0x1000 and shift is 0. We have a region at address 0x2000. There's a
581     // narrowOop P stored in this region that points to an object at address 0x2200.
582     // P's encoded value is 0x1200.
583     //
584     // Runtime base = 0x4000 and shift is also 0. If we map this region at 0x5000, then
585     // the value P can remain 0x1200. The decoded address = (0x4000 + (0x1200 << 0)) = 0x5200,
586     // which is the runtime location of the referenced object.
587     return /*runtime*/ (address)((uintptr_t)CompressedOops::base() + r->mapping_offset());
588   } else {
589     // This was the hard-coded requested base address used at dump time. With uncompressed oops,
590     // the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
591     // what base address was picked at duump time.
592     return (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
593   }
594 }
595 
596 bool AOTMappedHeapLoader::map_heap_region(FileMapInfo* info) {
597   if (map_heap_region_impl(info)) {
598 #ifdef ASSERT
599     // The "old" regions must be parsable -- we cannot have any unused space
600     // at the start of the lowest G1 region that contains archived objects.
601     assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
602 
603     // Make sure we map at the very top of the heap - see comments in
604     // init_heap_region_relocation().
605     MemRegion heap_range = G1CollectedHeap::heap()->reserved();
606     assert(heap_range.contains(_mapped_heap_memregion), "must be");
607 
608     address heap_end = (address)heap_range.end();
609     address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
610     assert(heap_end >= mapped_heap_region_end, "must be");
611     assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
612            "must be at the top of the heap to avoid fragmentation");
613 #endif
614 
615     set_mapped();
616     return true;
617   } else {
618     return false;
619   }
620 }
621 
622 bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
623   assert(UseG1GC, "the following code assumes G1");
624 
625   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
626   size_t size = r->used();
627   if (size == 0) {
628     return false; // no archived java heap data
629   }
630 
631   size_t word_size = size / HeapWordSize;
632   address requested_start = heap_region_requested_address(info);
633 
634   aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
635 
636   // allocate from java heap
637   HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
638   if (start == nullptr) {
639     AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
640     return false;
641   }
642 
643   _mapped_heap_memregion = MemRegion(start, word_size);
644 
645   // Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
646   // for mapped region as it is part of the reserved java heap, which is already recorded.
647   char* addr = (char*)_mapped_heap_memregion.start();
648   char* base;
649 
650   if (AOTMetaspace::use_windows_memory_mapping() || UseLargePages) {
651     // With UseLargePages, memory mapping may fail on some OSes if the size is not
652     // large page aligned, so let's use read() instead. In this case, the memory region
653     // is already commited by G1 so we don't need to commit it again.
654     if (!info->read_region(AOTMetaspace::hp, addr,
655                            align_up(_mapped_heap_memregion.byte_size(), os::vm_page_size()),
656                            /* do_commit = */ !UseLargePages)) {
657       dealloc_heap_region(info);
658       aot_log_error(aot)("Failed to read archived heap region into " INTPTR_FORMAT, p2i(addr));
659       return false;
660     }
661     // Checks for VerifySharedSpaces is already done inside read_region()
662     base = addr;
663   } else {
664     base = info->map_heap_region(r, addr, _mapped_heap_memregion.byte_size());
665     if (base == nullptr || base != addr) {
666       dealloc_heap_region(info);
667       AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to map at required address in java heap. "
668                                             INTPTR_FORMAT ", size = %zu bytes",
669                                             p2i(addr), _mapped_heap_memregion.byte_size());
670       return false;
671     }
672 
673     if (VerifySharedSpaces && !r->check_region_crc(base)) {
674       dealloc_heap_region(info);
675       AOTMetaspace::report_loading_error("UseSharedSpaces: mapped heap region is corrupt");
676       return false;
677     }
678   }
679 
680   r->set_mapped_base(base);
681 
682   // If the requested range is different from the range allocated by GC, then
683   // the pointers need to be patched.
684   address mapped_start = (address) _mapped_heap_memregion.start();
685   ptrdiff_t delta = mapped_start - requested_start;
686   if (UseCompressedOops &&
687       (info->narrow_oop_mode() != CompressedOops::mode() ||
688        info->narrow_oop_shift() != CompressedOops::shift())) {
689     _heap_pointers_need_patching = true;
690   }
691   if (delta != 0) {
692     _heap_pointers_need_patching = true;
693   }
694   init_mapped_heap_info(mapped_start, delta, info->narrow_oop_shift());
695 
696   if (_heap_pointers_need_patching) {
697     char* bitmap_base = info->map_bitmap_region();
698     if (bitmap_base == nullptr) {
699       AOTMetaspace::report_loading_error("CDS heap cannot be used because bitmap region cannot be mapped");
700       dealloc_heap_region(info);
701       _heap_pointers_need_patching = false;
702       return false;
703     }
704   }
705   aot_log_info(aot)("Heap data mapped at " INTPTR_FORMAT ", size = %8zu bytes",
706                 p2i(mapped_start), _mapped_heap_memregion.byte_size());
707   aot_log_info(aot)("CDS heap data relocation delta = %zd bytes", delta);
708   return true;
709 }
710 
711 narrowOop AOTMappedHeapLoader::encoded_heap_region_dumptime_address(FileMapInfo* info) {
712   assert(CDSConfig::is_using_archive(), "runtime only");
713   assert(UseCompressedOops, "sanity");
714   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
715   return CompressedOops::narrow_oop_cast(r->mapping_offset() >> info->narrow_oop_shift());
716 }
717 
718 void AOTMappedHeapLoader::patch_heap_embedded_pointers(FileMapInfo* info) {
719   if (!info->is_mapped() || !_heap_pointers_need_patching) {
720     return;
721   }
722 
723   char* bitmap_base = info->map_bitmap_region();
724   assert(bitmap_base != nullptr, "must have already been mapped");
725 
726   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
727   patch_embedded_pointers(
728       info, _mapped_heap_memregion,
729       (address)(info->region_at(AOTMetaspace::bm)->mapped_base()) + r->oopmap_offset(),
730       r->oopmap_size_in_bits());
731 }
732 
733 void AOTMappedHeapLoader::fixup_mapped_heap_region(FileMapInfo* info) {
734   if (is_mapped()) {
735     assert(!_mapped_heap_memregion.is_empty(), "sanity");
736 
737     // Populate the archive regions' G1BlockOffsetTables. That ensures
738     // fast G1BlockOffsetTable::block_start operations for any given address
739     // within the archive regions when trying to find start of an object
740     // (e.g. during card table scanning).
741     G1CollectedHeap::heap()->populate_archive_regions_bot(_mapped_heap_memregion);
742   }
743 }
744 
745 // dealloc the archive regions from java heap
746 void AOTMappedHeapLoader::dealloc_heap_region(FileMapInfo* info) {
747   G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
748 }
749 
750 AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
751   class MappedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
752   private:
753     address _current;
754     address _next;
755 
756     address _buffer_start;
757     address _buffer_end;
758     uint64_t _buffer_start_narrow_oop;
759     intptr_t _buffer_to_requested_delta;
760     int _requested_shift;
761 
762     size_t _num_root_segments;
763     size_t _num_obj_arrays_logged;
764 
765   public:
766     MappedLoaderOopIterator(address buffer_start,
767                             address buffer_end,
768                             uint64_t buffer_start_narrow_oop,
769                             intptr_t buffer_to_requested_delta,
770                             int requested_shift,
771                             size_t num_root_segments)
772       : _current(nullptr),
773         _next(buffer_start),
774         _buffer_start(buffer_start),
775         _buffer_end(buffer_end),
776         _buffer_start_narrow_oop(buffer_start_narrow_oop),
777         _buffer_to_requested_delta(buffer_to_requested_delta),
778         _requested_shift(requested_shift),
779         _num_root_segments(num_root_segments),
780         _num_obj_arrays_logged(0) {
781     }
782 
783 
784     AOTMapLogger::OopData capture(address buffered_addr) {
785       oopDesc* raw_oop = (oopDesc*)buffered_addr;
786       size_t size = raw_oop->size();
787       address requested_addr = buffered_addr + _buffer_to_requested_delta;
788       intptr_t target_location = intptr_t(requested_addr);
789       uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
790       uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
791       Klass* klass = raw_oop->klass();
792 
793       return { buffered_addr,
794                requested_addr,
795                target_location,
796                narrow_location,
797                raw_oop,
798                klass,
799                size,
800                false };
801     }
802 
803     bool has_next() override {
804       return _next < _buffer_end;
805     }
806 
807     AOTMapLogger::OopData next() override {
808       _current = _next;
809       AOTMapLogger::OopData result = capture(_current);
810       if (result._klass->is_objArray_klass()) {
811         result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
812       }
813       _next = _current + result._size * BytesPerWord;
814       return result;
815     }
816 
817     AOTMapLogger::OopData obj_at(narrowOop* addr) override {
818       uint64_t n = (uint64_t)(*addr);
819       if (n == 0) {
820         return null_data();
821       } else {
822         precond(n >= _buffer_start_narrow_oop);
823         address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
824         return capture(buffer_addr);
825       }
826     }
827 
828     AOTMapLogger::OopData obj_at(oop* addr) override {
829       address requested_value = cast_from_oop<address>(*addr);
830       if (requested_value == nullptr) {
831         return null_data();
832       } else {
833         address buffer_addr = requested_value - _buffer_to_requested_delta;
834         return capture(buffer_addr);
835       }
836     }
837 
838     GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
839       return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
840     }
841   };
842 
843   FileMapRegion* r = info->region_at(AOTMetaspace::hp);
844   address requested_base = UseCompressedOops ? (address)info->narrow_oop_base() : heap_region_requested_address(info);
845   address requested_start = requested_base + r->mapping_offset();
846   int requested_shift = info->narrow_oop_shift();
847   intptr_t buffer_to_requested_delta = requested_start - buffer_start;
848   uint64_t buffer_start_narrow_oop = 0xdeadbeed;
849   if (UseCompressedOops) {
850     buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
851     assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
852   }
853 
854   return new MappedLoaderOopIterator(buffer_start,
855                                      buffer_end,
856                                      buffer_start_narrow_oop,
857                                      buffer_to_requested_delta,
858                                      requested_shift,
859                                      info->mapped_heap()->root_segments().count());
860 }
861 
862 #endif // INCLUDE_CDS_JAVA_HEAP