1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveHeapLoader.inline.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/heapShared.hpp"
 29 #include "cds/metaspaceShared.hpp"
 30 #include "classfile/classLoaderDataShared.hpp"
 31 #include "classfile/systemDictionaryShared.hpp"
 32 #include "classfile/vmClasses.hpp"
 33 #include "gc/shared/collectedHeap.hpp"
 34 #include "logging/log.hpp"
 35 #include "memory/iterator.inline.hpp"
 36 #include "memory/resourceArea.hpp"
 37 #include "memory/universe.hpp"
 38 #include "sanitizers/ub.hpp"
 39 #include "utilities/bitMap.inline.hpp"
 40 #include "utilities/copy.hpp"
 41 
 42 #if INCLUDE_CDS_JAVA_HEAP
 43 
 44 bool ArchiveHeapLoader::_is_mapped = false;
 45 bool ArchiveHeapLoader::_is_loaded = false;
 46 
 47 bool    ArchiveHeapLoader::_narrow_oop_base_initialized = false;
 48 address ArchiveHeapLoader::_narrow_oop_base;
 49 int     ArchiveHeapLoader::_narrow_oop_shift;
 50 
 51 // Support for loaded heap.
 52 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
 53 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
 54 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
 55 uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
 56 intx ArchiveHeapLoader::_runtime_offset = 0;
 57 bool ArchiveHeapLoader::_loading_failed = false;
 58 
 59 // Support for mapped heap.
 60 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
 61 bool      ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
 62 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
 63 
 64 // Every mapped region is offset by _mapped_heap_delta from its requested address.
 65 // See FileMapInfo::heap_region_requested_address().
 66 ATTRIBUTE_NO_UBSAN
 67 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
 68   assert(!_mapped_heap_relocation_initialized, "only once");
 69   if (!UseCompressedOops) {
 70     assert(dumptime_oop_shift == 0, "sanity");
 71   }
 72   assert(can_map(), "sanity");
 73   init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
 74   _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
 75   _mapped_heap_delta = delta;
 76   _mapped_heap_relocation_initialized = true;
 77 }
 78 
 79 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
 80   assert(!_narrow_oop_base_initialized, "only once");
 81   _narrow_oop_base_initialized = true;
 82   _narrow_oop_base = base;
 83   _narrow_oop_shift = shift;
 84 }
 85 
 86 void ArchiveHeapLoader::fixup_region() {
 87   FileMapInfo* mapinfo = FileMapInfo::current_info();
 88   if (is_mapped()) {
 89     mapinfo->fixup_mapped_heap_region();
 90   } else if (_loading_failed) {
 91     fill_failed_loaded_heap();
 92   }
 93   if (is_in_use()) {
 94     if (!CDSConfig::is_using_full_module_graph()) {
 95       // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
 96       ClassLoaderDataShared::clear_archived_oops();
 97     }
 98   }
 99 }
100 
101 // ------------------ Support for Region MAPPING -----------------------------------------
102 
103 // Patch all the embedded oop pointers inside an archived heap region,
104 // to be consistent with the runtime oop encoding.
105 class PatchCompressedEmbeddedPointers: public BitMapClosure {
106   narrowOop* _start;
107 
108  public:
109   PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
110 
111   bool do_bit(size_t offset) {
112     narrowOop* p = _start + offset;
113     narrowOop v = *p;
114     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
115     oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
116     RawAccess<IS_NOT_NULL>::oop_store(p, o);
117     return true;
118   }
119 };
120 
121 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
122   narrowOop* _start;
123   uint32_t _delta;
124 
125  public:
126   PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
127 
128   bool do_bit(size_t offset) {
129     narrowOop* p = _start + offset;
130     narrowOop v = *p;
131     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
132     narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
133     assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
134 #ifdef ASSERT
135     oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
136     oop o2 = CompressedOops::decode_not_null(new_v);
137     assert(o1 == o2, "quick delta must work");
138 #endif
139     RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
140     return true;
141   }
142 };
143 
144 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
145   oop* _start;
146 
147  public:
148   PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {}
149 
150   bool do_bit(size_t offset) {
151     oop* p = _start + offset;
152     intptr_t dumptime_oop = (intptr_t)((void*)*p);
153     assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
154     intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
155     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
156     return true;
157   }
158 };
159 
160 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
161                                                   FileMapInfo* info,
162                                                   MemRegion region) {
163   narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
164   narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
165   log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
166                   (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
167 
168   // Optimization: if dumptime shift is the same as runtime shift, we can perform a
169   // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
170   narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
171   if (_narrow_oop_shift == CompressedOops::shift()) {
172     uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
173     log_info(cds)("CDS heap data relocation quick delta = 0x%x", quick_delta);
174     if (quick_delta == 0) {
175       log_info(cds)("CDS heap data relocation unnecessary, quick_delta = 0");
176     } else {
177       PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
178       bm.iterate(&patcher);
179     }
180   } else {
181     log_info(cds)("CDS heap data quick relocation not possible");
182     PatchCompressedEmbeddedPointers patcher(patching_start);
183     bm.iterate(&patcher);
184   }
185 }
186 
187 // Patch all the non-null pointers that are embedded in the archived heap objects
188 // in this (mapped) region
189 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
190                                                 MemRegion region, address oopmap,
191                                                 size_t oopmap_size_in_bits) {
192   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
193   if (UseCompressedOops) {
194     patch_compressed_embedded_pointers(bm, info, region);
195   } else {
196     PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
197     bm.iterate(&patcher);
198   }
199 }
200 
201 // ------------------ Support for Region LOADING -----------------------------------------
202 
203 // The CDS archive remembers each heap object by its address at dump time, but
204 // the heap object may be loaded at a different address at run time. This structure is used
205 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
206 // to their runtime addresses.
207 struct LoadedArchiveHeapRegion {
208   int       _region_index;   // index for FileMapInfo::space_at(index)
209   size_t    _region_size;    // number of bytes in this region
210   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
211   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
212                              // runtime address is P + _runtime_offset
213   uintptr_t top() {
214     return _dumptime_base + _region_size;
215   }
216 };
217 
218 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
219   _dumptime_base = loaded_region->_dumptime_base;
220   _dumptime_top = loaded_region->top();
221   _runtime_offset = loaded_region->_runtime_offset;
222 }
223 
224 bool ArchiveHeapLoader::can_load() {
225   if (!UseCompressedOops) {
226     // Pointer relocation for uncompressed oops is unimplemented.
227     return false;
228   }
229   return Universe::heap()->can_load_archived_objects();
230 }
231 
232 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
233   narrowOop* _start;
234   intx _offset;
235   uintptr_t _base;
236   uintptr_t _top;
237 
238  public:
239   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
240     : _start(start),
241       _offset(loaded_region->_runtime_offset),
242       _base(loaded_region->_dumptime_base),
243       _top(loaded_region->top()) {}
244 
245   bool do_bit(size_t offset) {
246     assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
247     narrowOop* p = _start + offset;
248     narrowOop v = *p;
249     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
250     uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
251     assert(_base <= o && o < _top, "must be");
252 
253     o += _offset;
254     ArchiveHeapLoader::assert_in_loaded_heap(o);
255     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
256     return true;
257   }
258 };
259 
260 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
261                                            MemRegion& archive_space) {
262   size_t total_bytes = 0;
263   FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp);
264   r->assert_is_heap_region();
265   if (r->used() == 0) {
266     return false;
267   }
268 
269   assert(is_aligned(r->used(), HeapWordSize), "must be");
270   total_bytes += r->used();
271   loaded_region->_region_index = MetaspaceShared::hp;
272   loaded_region->_region_size = r->used();
273   loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
274 
275   assert(is_aligned(total_bytes, HeapWordSize), "must be");
276   size_t word_size = total_bytes / HeapWordSize;
277   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
278   if (buffer == nullptr) {
279     return false;
280   }
281 
282   archive_space = MemRegion(buffer, word_size);
283   _loaded_heap_bottom = (uintptr_t)archive_space.start();
284   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
285 
286   loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
287 
288   return true;
289 }
290 
291 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
292                                               uintptr_t load_address) {
293   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
294   if (bitmap_base == 0) {
295     _loading_failed = true;
296     return false; // OOM or CRC error
297   }
298 
299   FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
300   if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
301     // There's no easy way to free the buffer, so we will fill it with zero later
302     // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
303     log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
304     _loading_failed = true;
305     return false;
306   }
307   assert(r->mapped_base() == (char*)load_address, "sanity");
308   log_info(cds)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
309                 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
310                 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
311                 loaded_region->_region_size, loaded_region->_runtime_offset);
312 
313   uintptr_t oopmap = bitmap_base + r->oopmap_offset();
314   BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
315 
316   PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
317   bm.iterate(&patcher);
318   return true;
319 }
320 
321 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
322   assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented");
323   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
324 
325   LoadedArchiveHeapRegion loaded_region;
326   memset(&loaded_region, 0, sizeof(loaded_region));
327 
328   MemRegion archive_space;
329   if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
330     return false;
331   }
332 
333   if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
334     assert(_loading_failed, "must be");
335     return false;
336   }
337 
338   init_loaded_heap_relocation(&loaded_region);
339   _is_loaded = true;
340 
341   return true;
342 }
343 
344 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
345   ResourceHashtable<uintptr_t, bool>* _table;
346 
347  public:
348   VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
349 
350   virtual void do_oop(narrowOop* p) {
351     // This should be called before the loaded region is modified, so all the embedded pointers
352     // must be null, or must point to a valid object in the loaded region.
353     narrowOop v = *p;
354     if (!CompressedOops::is_null(v)) {
355       oop o = CompressedOops::decode_not_null(v);
356       uintptr_t u = cast_from_oop<uintptr_t>(o);
357       ArchiveHeapLoader::assert_in_loaded_heap(u);
358       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
359     }
360   }
361   virtual void do_oop(oop* p) {
362     // Uncompressed oops are not supported by loaded heaps.
363     Unimplemented();
364   }
365 };
366 
367 void ArchiveHeapLoader::finish_initialization() {
368   if (is_loaded()) {
369     // These operations are needed only when the heap is loaded (not mapped).
370     finish_loaded_heap();
371     if (VerifyArchivedFields > 0) {
372       verify_loaded_heap();
373     }
374   }
375   if (is_in_use()) {
376     patch_native_pointers();
377     intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
378 
379     // The heap roots are stored in one or more segments that are laid out consecutively.
380     // The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
381     HeapRootSegments segments = FileMapInfo::current_info()->heap_root_segments();
382     HeapShared::init_root_segment_sizes(segments.max_size_in_elems());
383     intptr_t first_segment_addr = bottom + segments.base_offset();
384     for (size_t c = 0; c < segments.count(); c++) {
385       oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
386       assert(segment_oop->is_objArray(), "Must be");
387       HeapShared::add_root_segment((objArrayOop)segment_oop);
388     }
389   }
390 }
391 
392 void ArchiveHeapLoader::finish_loaded_heap() {
393   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
394   HeapWord* top    = (HeapWord*)_loaded_heap_top;
395 
396   MemRegion archive_space = MemRegion(bottom, top);
397   Universe::heap()->complete_loaded_archive_space(archive_space);
398 }
399 
400 void ArchiveHeapLoader::verify_loaded_heap() {
401   log_info(cds, heap)("Verify all oops and pointers in loaded heap");
402 
403   ResourceMark rm;
404   ResourceHashtable<uintptr_t, bool> table;
405   VerifyLoadedHeapEmbeddedPointers verifier(&table);
406   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
407   HeapWord* top    = (HeapWord*)_loaded_heap_top;
408 
409   for (HeapWord* p = bottom; p < top; ) {
410     oop o = cast_to_oop(p);
411     table.put(cast_from_oop<uintptr_t>(o), true);
412     p += o->size();
413   }
414 
415   for (HeapWord* p = bottom; p < top; ) {
416     oop o = cast_to_oop(p);
417     o->oop_iterate(&verifier);
418     p += o->size();
419   }
420 }
421 
422 void ArchiveHeapLoader::fill_failed_loaded_heap() {
423   assert(_loading_failed, "must be");
424   if (_loaded_heap_bottom != 0) {
425     assert(_loaded_heap_top != 0, "must be");
426     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
427     HeapWord* top = (HeapWord*)_loaded_heap_top;
428     Universe::heap()->fill_with_objects(bottom, top - bottom);
429   }
430 }
431 
432 oop ArchiveHeapLoader::oop_from_offset(int offset) {
433   // Once GC starts, the offsets saved in CachedCodeDirectoryInternal::_permanent_oop_offsets
434   // will become invalid. I don't know what function can check if GCs are allowed, but surely
435   // GCs can't happen before the Object class is loaded.
436   assert(CDSConfig::is_using_archive(), "sanity");
437   assert(vmClasses::Object_klass()->class_loader_data() == nullptr,
438          "can be called only very early during VM start-up");
439   if (is_loaded()) {
440     return cast_to_oop(_loaded_heap_bottom + offset);
441   } else {
442     assert(is_mapped(), "must be");
443     return cast_to_oop(_mapped_heap_bottom + offset);
444   }
445 }
446 
447 class PatchNativePointers: public BitMapClosure {
448   Metadata** _start;
449 
450  public:
451   PatchNativePointers(Metadata** start) : _start(start) {}
452 
453   bool do_bit(size_t offset) {
454     Metadata** p = _start + offset;
455     *p = (Metadata*)(address(*p) + MetaspaceShared::relocation_delta());
456     return true;
457   }
458 };
459 
460 void ArchiveHeapLoader::patch_native_pointers() {
461   if (MetaspaceShared::relocation_delta() == 0) {
462     return;
463   }
464 
465   FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp);
466   if (r->mapped_base() != nullptr && r->has_ptrmap()) {
467     log_info(cds, heap)("Patching native pointers in heap region");
468     BitMapView bm = FileMapInfo::current_info()->ptrmap_view(MetaspaceShared::hp);
469     PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
470     bm.iterate(&patcher);
471   }
472 }
473 #endif // INCLUDE_CDS_JAVA_HEAP