1 /*
  2  * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveHeapLoader.inline.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/heapShared.hpp"
 29 #include "cds/metaspaceShared.hpp"
 30 #include "classfile/classLoaderDataShared.hpp"
 31 #include "classfile/systemDictionaryShared.hpp"
 32 #include "gc/shared/collectedHeap.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/iterator.inline.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "memory/universe.hpp"
 37 #include "utilities/bitMap.inline.hpp"
 38 #include "utilities/copy.hpp"
 39 
 40 #if INCLUDE_CDS_JAVA_HEAP
 41 
 42 bool ArchiveHeapLoader::_is_mapped = false;
 43 bool ArchiveHeapLoader::_is_loaded = false;
 44 
 45 bool    ArchiveHeapLoader::_narrow_oop_base_initialized = false;
 46 address ArchiveHeapLoader::_narrow_oop_base;
 47 int     ArchiveHeapLoader::_narrow_oop_shift;
 48 
 49 // Support for loaded heap.
 50 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
 51 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
 52 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
 53 uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
 54 intx ArchiveHeapLoader::_runtime_offset = 0;
 55 bool ArchiveHeapLoader::_loading_failed = false;
 56 
 57 // Support for mapped heap.
 58 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
 59 bool      ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
 60 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
 61 
 62 // Every mapped region is offset by _mapped_heap_delta from its requested address.
 63 // See FileMapInfo::heap_region_requested_address().
 64 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
 65   assert(!_mapped_heap_relocation_initialized, "only once");
 66   if (!UseCompressedOops) {
 67     assert(dumptime_oop_shift == 0, "sanity");
 68   }
 69   assert(can_map(), "sanity");
 70   init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
 71   _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
 72   _mapped_heap_delta = delta;
 73   _mapped_heap_relocation_initialized = true;
 74 }
 75 
 76 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
 77   assert(!_narrow_oop_base_initialized, "only once");
 78   _narrow_oop_base_initialized = true;
 79   _narrow_oop_base = base;
 80   _narrow_oop_shift = shift;
 81 }
 82 
 83 void ArchiveHeapLoader::fixup_region() {
 84   FileMapInfo* mapinfo = FileMapInfo::current_info();
 85   if (is_mapped()) {
 86     mapinfo->fixup_mapped_heap_region();
 87   } else if (_loading_failed) {
 88     fill_failed_loaded_heap();
 89   }
 90   if (is_in_use()) {
 91     if (!CDSConfig::is_using_full_module_graph()) {
 92       // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
 93       ClassLoaderDataShared::clear_archived_oops();
 94     }
 95   }
 96 }
 97 
 98 // ------------------ Support for Region MAPPING -----------------------------------------
 99 
100 // Patch all the embedded oop pointers inside an archived heap region,
101 // to be consistent with the runtime oop encoding.
102 class PatchCompressedEmbeddedPointers: public BitMapClosure {
103   narrowOop* _start;
104 
105  public:
106   PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
107 
108   bool do_bit(size_t offset) {
109     narrowOop* p = _start + offset;
110     narrowOop v = *p;
111     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
112     oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
113     RawAccess<IS_NOT_NULL>::oop_store(p, o);
114     return true;
115   }
116 };
117 
118 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
119   narrowOop* _start;
120   uint32_t _delta;
121 
122  public:
123   PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
124 
125   bool do_bit(size_t offset) {
126     narrowOop* p = _start + offset;
127     narrowOop v = *p;
128     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
129     narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
130     assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
131 #ifdef ASSERT
132     oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
133     oop o2 = CompressedOops::decode_not_null(new_v);
134     assert(o1 == o2, "quick delta must work");
135 #endif
136     RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
137     return true;
138   }
139 };
140 
141 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
142   oop* _start;
143 
144  public:
145   PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {}
146 
147   bool do_bit(size_t offset) {
148     oop* p = _start + offset;
149     intptr_t dumptime_oop = (intptr_t)((void*)*p);
150     assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
151     intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
152     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
153     return true;
154   }
155 };
156 
157 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
158                                                   FileMapInfo* info,
159                                                   MemRegion region) {
160   narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
161   narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
162   log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
163                   (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
164 
165   // Optimization: if dumptime shift is the same as runtime shift, we can perform a
166   // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
167   narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
168   if (_narrow_oop_shift == CompressedOops::shift()) {
169     uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
170     log_info(cds)("CDS heap data relocation quick delta = 0x%x", quick_delta);
171     if (quick_delta == 0) {
172       log_info(cds)("CDS heap data relocation unnecessary, quick_delta = 0");
173     } else {
174       PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
175       bm.iterate(&patcher);
176     }
177   } else {
178     log_info(cds)("CDS heap data quick relocation not possible");
179     PatchCompressedEmbeddedPointers patcher(patching_start);
180     bm.iterate(&patcher);
181   }
182 }
183 
184 // Patch all the non-null pointers that are embedded in the archived heap objects
185 // in this (mapped) region
186 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
187                                                 MemRegion region, address oopmap,
188                                                 size_t oopmap_size_in_bits) {
189   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
190   if (UseCompressedOops) {
191     patch_compressed_embedded_pointers(bm, info, region);
192   } else {
193     PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
194     bm.iterate(&patcher);
195   }
196 }
197 
198 // ------------------ Support for Region LOADING -----------------------------------------
199 
200 // The CDS archive remembers each heap object by its address at dump time, but
201 // the heap object may be loaded at a different address at run time. This structure is used
202 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
203 // to their runtime addresses.
204 struct LoadedArchiveHeapRegion {
205   int       _region_index;   // index for FileMapInfo::space_at(index)
206   size_t    _region_size;    // number of bytes in this region
207   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
208   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
209                              // runtime address is P + _runtime_offset
210   uintptr_t top() {
211     return _dumptime_base + _region_size;
212   }
213 };
214 
215 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
216   _dumptime_base = loaded_region->_dumptime_base;
217   _dumptime_top = loaded_region->top();
218   _runtime_offset = loaded_region->_runtime_offset;
219 }
220 
221 bool ArchiveHeapLoader::can_load() {
222   if (!UseCompressedOops) {
223     // Pointer relocation for uncompressed oops is unimplemented.
224     return false;
225   }
226   return Universe::heap()->can_load_archived_objects();
227 }
228 
229 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
230   narrowOop* _start;
231   intx _offset;
232   uintptr_t _base;
233   uintptr_t _top;
234 
235  public:
236   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
237     : _start(start),
238       _offset(loaded_region->_runtime_offset),
239       _base(loaded_region->_dumptime_base),
240       _top(loaded_region->top()) {}
241 
242   bool do_bit(size_t offset) {
243     assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
244     narrowOop* p = _start + offset;
245     narrowOop v = *p;
246     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
247     uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
248     assert(_base <= o && o < _top, "must be");
249 
250     o += _offset;
251     ArchiveHeapLoader::assert_in_loaded_heap(o);
252     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
253     return true;
254   }
255 };
256 
257 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
258                                            MemRegion& archive_space) {
259   size_t total_bytes = 0;
260   FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp);
261   r->assert_is_heap_region();
262   if (r->used() == 0) {
263     return false;
264   }
265 
266   assert(is_aligned(r->used(), HeapWordSize), "must be");
267   total_bytes += r->used();
268   loaded_region->_region_index = MetaspaceShared::hp;
269   loaded_region->_region_size = r->used();
270   loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
271 
272   assert(is_aligned(total_bytes, HeapWordSize), "must be");
273   size_t word_size = total_bytes / HeapWordSize;
274   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
275   if (buffer == nullptr) {
276     return false;
277   }
278 
279   archive_space = MemRegion(buffer, word_size);
280   _loaded_heap_bottom = (uintptr_t)archive_space.start();
281   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
282 
283   loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
284 
285   return true;
286 }
287 
288 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
289                                               uintptr_t load_address) {
290   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
291   if (bitmap_base == 0) {
292     _loading_failed = true;
293     return false; // OOM or CRC error
294   }
295 
296   FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
297   if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
298     // There's no easy way to free the buffer, so we will fill it with zero later
299     // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
300     log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
301     _loading_failed = true;
302     return false;
303   }
304   assert(r->mapped_base() == (char*)load_address, "sanity");
305   log_info(cds)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
306                 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
307                 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
308                 loaded_region->_region_size, loaded_region->_runtime_offset);
309 
310   uintptr_t oopmap = bitmap_base + r->oopmap_offset();
311   BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
312 
313   PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
314   bm.iterate(&patcher);
315   return true;
316 }
317 
318 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
319   assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented");
320   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
321 
322   LoadedArchiveHeapRegion loaded_region;
323   memset(&loaded_region, 0, sizeof(loaded_region));
324 
325   MemRegion archive_space;
326   if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
327     return false;
328   }
329 
330   if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
331     assert(_loading_failed, "must be");
332     return false;
333   }
334 
335   init_loaded_heap_relocation(&loaded_region);
336   _is_loaded = true;
337 
338   return true;
339 }
340 
341 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
342   ResourceHashtable<uintptr_t, bool>* _table;
343 
344  public:
345   VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
346 
347   virtual void do_oop(narrowOop* p) {
348     // This should be called before the loaded region is modified, so all the embedded pointers
349     // must be null, or must point to a valid object in the loaded region.
350     narrowOop v = *p;
351     if (!CompressedOops::is_null(v)) {
352       oop o = CompressedOops::decode_not_null(v);
353       uintptr_t u = cast_from_oop<uintptr_t>(o);
354       ArchiveHeapLoader::assert_in_loaded_heap(u);
355       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
356     }
357   }
358   virtual void do_oop(oop* p) {
359     // Uncompressed oops are not supported by loaded heaps.
360     Unimplemented();
361   }
362 };
363 
364 void ArchiveHeapLoader::finish_initialization() {
365   if (is_loaded()) {
366     // These operations are needed only when the heap is loaded (not mapped).
367     finish_loaded_heap();
368     if (VerifyArchivedFields > 0) {
369       verify_loaded_heap();
370     }
371   }
372   if (is_in_use()) {
373     patch_native_pointers();
374     intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
375     intptr_t roots_oop = bottom + FileMapInfo::current_info()->heap_roots_offset();
376     HeapShared::init_roots(cast_to_oop(roots_oop));
377   }
378 }
379 
380 void ArchiveHeapLoader::finish_loaded_heap() {
381   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
382   HeapWord* top    = (HeapWord*)_loaded_heap_top;
383 
384   MemRegion archive_space = MemRegion(bottom, top);
385   Universe::heap()->complete_loaded_archive_space(archive_space);
386 }
387 
388 void ArchiveHeapLoader::verify_loaded_heap() {
389   log_info(cds, heap)("Verify all oops and pointers in loaded heap");
390 
391   ResourceMark rm;
392   ResourceHashtable<uintptr_t, bool> table;
393   VerifyLoadedHeapEmbeddedPointers verifier(&table);
394   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
395   HeapWord* top    = (HeapWord*)_loaded_heap_top;
396 
397   for (HeapWord* p = bottom; p < top; ) {
398     oop o = cast_to_oop(p);
399     table.put(cast_from_oop<uintptr_t>(o), true);
400     p += o->size();
401   }
402 
403   for (HeapWord* p = bottom; p < top; ) {
404     oop o = cast_to_oop(p);
405     o->oop_iterate(&verifier);
406     p += o->size();
407   }
408 }
409 
410 void ArchiveHeapLoader::fill_failed_loaded_heap() {
411   assert(_loading_failed, "must be");
412   if (_loaded_heap_bottom != 0) {
413     assert(_loaded_heap_top != 0, "must be");
414     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
415     HeapWord* top = (HeapWord*)_loaded_heap_top;
416     Universe::heap()->fill_with_objects(bottom, top - bottom);
417   }
418 }
419 
420 class PatchNativePointers: public BitMapClosure {
421   Metadata** _start;
422 
423  public:
424   PatchNativePointers(Metadata** start) : _start(start) {}
425 
426   bool do_bit(size_t offset) {
427     Metadata** p = _start + offset;
428     *p = (Metadata*)(address(*p) + MetaspaceShared::relocation_delta());
429     return true;
430   }
431 };
432 
433 void ArchiveHeapLoader::patch_native_pointers() {
434   if (MetaspaceShared::relocation_delta() == 0) {
435     return;
436   }
437 
438   FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp);
439   if (r->mapped_base() != nullptr && r->has_ptrmap()) {
440     log_info(cds, heap)("Patching native pointers in heap region");
441     BitMapView bm = FileMapInfo::current_info()->ptrmap_view(MetaspaceShared::hp);
442     PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
443     bm.iterate(&patcher);
444   }
445 }
446 #endif // INCLUDE_CDS_JAVA_HEAP