1 /*
  2  * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveHeapLoader.inline.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/heapShared.hpp"
 29 #include "cds/metaspaceShared.hpp"
 30 #include "classfile/classLoaderDataShared.hpp"
 31 #include "classfile/systemDictionaryShared.hpp"
 32 #include "gc/shared/collectedHeap.hpp"
 33 #include "logging/log.hpp"
 34 #include "memory/iterator.inline.hpp"
 35 #include "memory/resourceArea.hpp"
 36 #include "memory/universe.hpp"
 37 #include "utilities/bitMap.inline.hpp"
 38 #include "utilities/copy.hpp"
 39 
 40 #if INCLUDE_CDS_JAVA_HEAP
 41 
 42 bool ArchiveHeapLoader::_is_mapped = false;
 43 bool ArchiveHeapLoader::_is_loaded = false;
 44 
 45 bool    ArchiveHeapLoader::_narrow_oop_base_initialized = false;
 46 address ArchiveHeapLoader::_narrow_oop_base;
 47 int     ArchiveHeapLoader::_narrow_oop_shift;
 48 
 49 // Support for loaded heap.
 50 uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
 51 uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
 52 uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
 53 uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
 54 intx ArchiveHeapLoader::_runtime_offset = 0;
 55 bool ArchiveHeapLoader::_loading_failed = false;
 56 
 57 // Support for mapped heap.
 58 uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
 59 bool      ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
 60 ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
 61 
 62 // Every mapped region is offset by _mapped_heap_delta from its requested address.
 63 // See FileMapInfo::heap_region_requested_address().
 64 void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
 65   assert(!_mapped_heap_relocation_initialized, "only once");
 66   if (!UseCompressedOops) {
 67     assert(dumptime_oop_shift == 0, "sanity");
 68   }
 69   assert(can_map(), "sanity");
 70   init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
 71   _mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
 72   _mapped_heap_delta = delta;
 73   _mapped_heap_relocation_initialized = true;
 74 }
 75 
 76 void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
 77   assert(!_narrow_oop_base_initialized, "only once");
 78   _narrow_oop_base_initialized = true;
 79   _narrow_oop_base = base;
 80   _narrow_oop_shift = shift;
 81 }
 82 
 83 void ArchiveHeapLoader::fixup_region() {
 84   FileMapInfo* mapinfo = FileMapInfo::current_info();
 85   if (is_mapped()) {
 86     mapinfo->fixup_mapped_heap_region();
 87   } else if (_loading_failed) {
 88     fill_failed_loaded_heap();
 89   }
 90   if (is_in_use()) {
 91     if (!CDSConfig::is_loading_full_module_graph()) {
 92       // Need to remove all the archived java.lang.Module objects from HeapShared::roots().
 93       ClassLoaderDataShared::clear_archived_oops();
 94     }
 95   }
 96 }
 97 
 98 // ------------------ Support for Region MAPPING -----------------------------------------
 99 
100 // Patch all the embedded oop pointers inside an archived heap region,
101 // to be consistent with the runtime oop encoding.
102 class PatchCompressedEmbeddedPointers: public BitMapClosure {
103   narrowOop* _start;
104 
105  public:
106   PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
107 
108   bool do_bit(size_t offset) {
109     narrowOop* p = _start + offset;
110     narrowOop v = *p;
111     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
112     oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
113     RawAccess<IS_NOT_NULL>::oop_store(p, o);
114     return true;
115   }
116 };
117 
118 class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
119   narrowOop* _start;
120   uint32_t _delta;
121 
122  public:
123   PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
124 
125   bool do_bit(size_t offset) {
126     narrowOop* p = _start + offset;
127     narrowOop v = *p;
128     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
129     narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
130     assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
131 #ifdef ASSERT
132     oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
133     oop o2 = CompressedOops::decode_not_null(new_v);
134     assert(o1 == o2, "quick delta must work");
135 #endif
136     RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
137     return true;
138   }
139 };
140 
141 class PatchUncompressedEmbeddedPointers: public BitMapClosure {
142   oop* _start;
143 
144  public:
145   PatchUncompressedEmbeddedPointers(oop* start) : _start(start) {}
146 
147   bool do_bit(size_t offset) {
148     oop* p = _start + offset;
149     intptr_t dumptime_oop = (intptr_t)((void*)*p);
150     assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
151     intptr_t runtime_oop = dumptime_oop + ArchiveHeapLoader::mapped_heap_delta();
152     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
153     return true;
154   }
155 };
156 
157 void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
158                                                   FileMapInfo* info,
159                                                   MemRegion region) {
160   narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
161   narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
162   log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
163                   (uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
164 
165   // Optimization: if dumptime shift is the same as runtime shift, we can perform a
166   // quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
167   if (_narrow_oop_shift == CompressedOops::shift()) {
168     uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
169     log_info(cds)("CDS heap data relocation quick delta = 0x%x", quick_delta);
170     if (quick_delta == 0) {
171       log_info(cds)("CDS heap data relocation unnecessary, quick_delta = 0");
172     } else {
173       PatchCompressedEmbeddedPointersQuick patcher((narrowOop*)region.start(), quick_delta);
174       bm.iterate(&patcher);
175     }
176   } else {
177     log_info(cds)("CDS heap data quick relocation not possible");
178     PatchCompressedEmbeddedPointers patcher((narrowOop*)region.start());
179     bm.iterate(&patcher);
180   }
181 }
182 
183 // Patch all the non-null pointers that are embedded in the archived heap objects
184 // in this (mapped) region
185 void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
186                                                 MemRegion region, address oopmap,
187                                                 size_t oopmap_size_in_bits) {
188   BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
189 
190 #ifndef PRODUCT
191   ResourceMark rm;
192   ResourceBitMap checkBm = HeapShared::calculate_oopmap(region);
193   assert(bm.is_same(checkBm), "sanity");
194 #endif
195 
196   if (UseCompressedOops) {
197     patch_compressed_embedded_pointers(bm, info, region);
198   } else {
199     PatchUncompressedEmbeddedPointers patcher((oop*)region.start());
200     bm.iterate(&patcher);
201   }
202 }
203 
204 // ------------------ Support for Region LOADING -----------------------------------------
205 
206 // The CDS archive remembers each heap object by its address at dump time, but
207 // the heap object may be loaded at a different address at run time. This structure is used
208 // to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
209 // to their runtime addresses.
210 struct LoadedArchiveHeapRegion {
211   int       _region_index;   // index for FileMapInfo::space_at(index)
212   size_t    _region_size;    // number of bytes in this region
213   uintptr_t _dumptime_base;  // The dump-time (decoded) address of the first object in this region
214   intx      _runtime_offset; // If an object's dump time address P is within in this region, its
215                              // runtime address is P + _runtime_offset
216   uintptr_t top() {
217     return _dumptime_base + _region_size;
218   }
219 };
220 
221 void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
222   _dumptime_base = loaded_region->_dumptime_base;
223   _dumptime_top = loaded_region->top();
224   _runtime_offset = loaded_region->_runtime_offset;
225 }
226 
227 bool ArchiveHeapLoader::can_load() {
228   if (!UseCompressedOops) {
229     // Pointer relocation for uncompressed oops is unimplemented.
230     return false;
231   }
232   return Universe::heap()->can_load_archived_objects();
233 }
234 
235 class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
236   narrowOop* _start;
237   intx _offset;
238   uintptr_t _base;
239   uintptr_t _top;
240 
241  public:
242   PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
243     : _start(start),
244       _offset(loaded_region->_runtime_offset),
245       _base(loaded_region->_dumptime_base),
246       _top(loaded_region->top()) {}
247 
248   bool do_bit(size_t offset) {
249     assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
250     narrowOop* p = _start + offset;
251     narrowOop v = *p;
252     assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
253     uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
254     assert(_base <= o && o < _top, "must be");
255 
256     o += _offset;
257     ArchiveHeapLoader::assert_in_loaded_heap(o);
258     RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
259     return true;
260   }
261 };
262 
263 bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
264                                            MemRegion& archive_space) {
265   size_t total_bytes = 0;
266   FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp);
267   r->assert_is_heap_region();
268   if (r->used() == 0) {
269     return false;
270   }
271 
272   assert(is_aligned(r->used(), HeapWordSize), "must be");
273   total_bytes += r->used();
274   loaded_region->_region_index = MetaspaceShared::hp;
275   loaded_region->_region_size = r->used();
276   loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
277 
278   assert(is_aligned(total_bytes, HeapWordSize), "must be");
279   size_t word_size = total_bytes / HeapWordSize;
280   HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
281   if (buffer == nullptr) {
282     return false;
283   }
284 
285   archive_space = MemRegion(buffer, word_size);
286   _loaded_heap_bottom = (uintptr_t)archive_space.start();
287   _loaded_heap_top    = _loaded_heap_bottom + total_bytes;
288 
289   loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
290 
291   return true;
292 }
293 
294 bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
295                                               uintptr_t load_address) {
296   uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
297   if (bitmap_base == 0) {
298     _loading_failed = true;
299     return false; // OOM or CRC error
300   }
301 
302   FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
303   if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
304     // There's no easy way to free the buffer, so we will fill it with zero later
305     // in fill_failed_loaded_heap(), and it will eventually be GC'ed.
306     log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
307     _loading_failed = true;
308     return false;
309   }
310   assert(r->mapped_base() == (char*)load_address, "sanity");
311   log_info(cds)("Loaded heap    region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
312                 " size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
313                 loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
314                 loaded_region->_region_size, loaded_region->_runtime_offset);
315 
316   uintptr_t oopmap = bitmap_base + r->oopmap_offset();
317   BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
318 
319   PatchLoadedRegionPointers patcher((narrowOop*)load_address, loaded_region);
320   bm.iterate(&patcher);
321   return true;
322 }
323 
324 bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
325   assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented");
326   init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
327 
328   LoadedArchiveHeapRegion loaded_region;
329   memset(&loaded_region, 0, sizeof(loaded_region));
330 
331   MemRegion archive_space;
332   if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
333     return false;
334   }
335 
336   if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
337     assert(_loading_failed, "must be");
338     return false;
339   }
340 
341   init_loaded_heap_relocation(&loaded_region);
342   _is_loaded = true;
343 
344   return true;
345 }
346 
347 class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
348   ResourceHashtable<uintptr_t, bool>* _table;
349 
350  public:
351   VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
352 
353   virtual void do_oop(narrowOop* p) {
354     // This should be called before the loaded region is modified, so all the embedded pointers
355     // must be null, or must point to a valid object in the loaded region.
356     narrowOop v = *p;
357     if (!CompressedOops::is_null(v)) {
358       oop o = CompressedOops::decode_not_null(v);
359       uintptr_t u = cast_from_oop<uintptr_t>(o);
360       ArchiveHeapLoader::assert_in_loaded_heap(u);
361       guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
362     }
363   }
364   virtual void do_oop(oop* p) {
365     // Uncompressed oops are not supported by loaded heaps.
366     Unimplemented();
367   }
368 };
369 
370 void ArchiveHeapLoader::finish_initialization() {
371   if (is_loaded()) {
372     // These operations are needed only when the heap is loaded (not mapped).
373     finish_loaded_heap();
374     if (VerifyArchivedFields > 0) {
375       verify_loaded_heap();
376     }
377   }
378   if (is_in_use()) {
379     patch_native_pointers();
380     intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
381     intptr_t roots_oop = bottom + FileMapInfo::current_info()->heap_roots_offset();
382     HeapShared::init_roots(cast_to_oop(roots_oop));
383   }
384 }
385 
386 void ArchiveHeapLoader::finish_loaded_heap() {
387   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
388   HeapWord* top    = (HeapWord*)_loaded_heap_top;
389 
390   MemRegion archive_space = MemRegion(bottom, top);
391   Universe::heap()->complete_loaded_archive_space(archive_space);
392 }
393 
394 void ArchiveHeapLoader::verify_loaded_heap() {
395   log_info(cds, heap)("Verify all oops and pointers in loaded heap");
396 
397   ResourceMark rm;
398   ResourceHashtable<uintptr_t, bool> table;
399   VerifyLoadedHeapEmbeddedPointers verifier(&table);
400   HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
401   HeapWord* top    = (HeapWord*)_loaded_heap_top;
402 
403   for (HeapWord* p = bottom; p < top; ) {
404     oop o = cast_to_oop(p);
405     table.put(cast_from_oop<uintptr_t>(o), true);
406     p += o->size();
407   }
408 
409   for (HeapWord* p = bottom; p < top; ) {
410     oop o = cast_to_oop(p);
411     o->oop_iterate(&verifier);
412     p += o->size();
413   }
414 }
415 
416 void ArchiveHeapLoader::fill_failed_loaded_heap() {
417   assert(_loading_failed, "must be");
418   if (_loaded_heap_bottom != 0) {
419     assert(_loaded_heap_top != 0, "must be");
420     HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
421     HeapWord* top = (HeapWord*)_loaded_heap_top;
422     Universe::heap()->fill_with_objects(bottom, top - bottom);
423   }
424 }
425 
426 class PatchNativePointers: public BitMapClosure {
427   Metadata** _start;
428 
429  public:
430   PatchNativePointers(Metadata** start) : _start(start) {}
431 
432   bool do_bit(size_t offset) {
433     Metadata** p = _start + offset;
434     *p = (Metadata*)(address(*p) + MetaspaceShared::relocation_delta());
435     // Currently we have only Klass pointers in heap objects.
436     // This needs to be relaxed when we support other types of native
437     // pointers such as Method.
438     assert(((Klass*)(*p))->is_klass(), "must be");
439     return true;
440   }
441 };
442 
443 void ArchiveHeapLoader::patch_native_pointers() {
444   if (MetaspaceShared::relocation_delta() == 0) {
445     return;
446   }
447 
448   FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp);
449   if (r->mapped_base() != nullptr && r->has_ptrmap()) {
450     log_info(cds, heap)("Patching native pointers in heap region");
451     BitMapView bm = r->ptrmap_view();
452     PatchNativePointers patcher((Metadata**)r->mapped_base());
453     bm.iterate(&patcher);
454   }
455 }
456 #endif // INCLUDE_CDS_JAVA_HEAP