1 /*
  2  * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "cds/archiveHeapWriter.hpp"
 27 #include "cds/cdsConfig.hpp"
 28 #include "cds/filemap.hpp"
 29 #include "cds/heapShared.hpp"
 30 #include "classfile/systemDictionary.hpp"
 31 #include "gc/shared/collectedHeap.hpp"
 32 #include "memory/iterator.inline.hpp"
 33 #include "memory/oopFactory.hpp"
 34 #include "memory/universe.hpp"
 35 #include "oops/compressedOops.hpp"
 36 #include "oops/oop.inline.hpp"
 37 #include "oops/objArrayOop.inline.hpp"
 38 #include "oops/oopHandle.inline.hpp"
 39 #include "oops/typeArrayKlass.hpp"
 40 #include "oops/typeArrayOop.hpp"
 41 #include "runtime/java.hpp"
 42 #include "runtime/mutexLocker.hpp"
 43 #include "utilities/bitMap.inline.hpp"
 44 
 45 #if INCLUDE_G1GC
 46 #include "gc/g1/g1CollectedHeap.hpp"
 47 #include "gc/g1/heapRegion.hpp"
 48 #endif
 49 
 50 #if INCLUDE_CDS_JAVA_HEAP
 51 
 52 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
 53 
 54 // The following are offsets from buffer_bottom()
 55 size_t ArchiveHeapWriter::_buffer_used;
 56 size_t ArchiveHeapWriter::_heap_roots_offset;
 57 
 58 size_t ArchiveHeapWriter::_heap_roots_word_size;
 59 
 60 address ArchiveHeapWriter::_requested_bottom;
 61 address ArchiveHeapWriter::_requested_top;
 62 
 63 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
 64 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
 65 
 66 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
 67   ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
 68 
 69 
 70 typedef ResourceHashtable<address, size_t,
 71       127, // prime number
 72       AnyObj::C_HEAP,
 73       mtClassShared> FillersTable;
 74 static FillersTable* _fillers;
 75 
 76 void ArchiveHeapWriter::init() {
 77   if (HeapShared::can_write()) {
 78     Universe::heap()->collect(GCCause::_java_lang_system_gc);
 79 
 80     _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable();
 81     _fillers = new FillersTable();
 82     _requested_bottom = nullptr;
 83     _requested_top = nullptr;
 84 
 85     _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
 86     _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
 87 
 88     guarantee(UseG1GC, "implementation limitation");
 89     guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
 90   }
 91 }
 92 
 93 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
 94   _source_objs->append(src_obj);
 95 }
 96 
 97 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
 98                               ArchiveHeapInfo* heap_info) {
 99   assert(HeapShared::can_write(), "sanity");
100   allocate_buffer();
101   copy_source_objs_to_buffer(roots);
102   set_requested_address(heap_info);
103   relocate_embedded_oops(roots, heap_info);
104 }
105 
106 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
107   return is_too_large_to_archive(o->size());
108 }
109 
110 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
111   typeArrayOop value = java_lang_String::value_no_keepalive(string);
112   return is_too_large_to_archive(value);
113 }
114 
115 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
116   assert(size > 0, "no zero-size object");
117   assert(size * HeapWordSize > size, "no overflow");
118   static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
119 
120   size_t byte_size = size * HeapWordSize;
121   if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
122     return true;
123   } else {
124     return false;
125   }
126 }
127 
128 // Various lookup functions between source_obj, buffered_obj and requested_obj
129 bool ArchiveHeapWriter::is_in_requested_range(oop o) {
130   assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
131   address a = cast_from_oop<address>(o);
132   return (_requested_bottom <= a && a < _requested_top);
133 }
134 
135 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
136   oop req_obj = cast_to_oop(_requested_bottom + offset);
137   assert(is_in_requested_range(req_obj), "must be");
138   return req_obj;
139 }
140 
141 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
142   assert(CDSConfig::is_dumping_heap(), "dump-time only");
143   HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
144   if (p != nullptr) {
145     return requested_obj_from_buffer_offset(p->buffer_offset());
146   } else {
147     return nullptr;
148   }
149 }
150 
151 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
152   oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
153   if (p != nullptr) {
154     return *p;
155   } else {
156     return nullptr;
157   }
158 }
159 
160 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
161   return _requested_bottom + buffered_address_to_offset(buffered_addr);
162 }
163 
164 oop ArchiveHeapWriter::heap_roots_requested_address() {
165   return cast_to_oop(_requested_bottom + _heap_roots_offset);
166 }
167 
168 address ArchiveHeapWriter::requested_address() {
169   assert(_buffer != nullptr, "must be initialized");
170   return _requested_bottom;
171 }
172 
173 void ArchiveHeapWriter::allocate_buffer() {
174   int initial_buffer_size = 100000;
175   _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
176   _buffer_used = 0;
177   ensure_buffer_space(1); // so that buffer_bottom() works
178 }
179 
180 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
181   // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
182   guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
183   _buffer->at_grow(to_array_index(min_bytes));
184 }
185 
186 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
187   Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
188   int length = roots->length();
189   _heap_roots_word_size = objArrayOopDesc::object_size(length);
190   size_t byte_size = _heap_roots_word_size * HeapWordSize;
191   if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
192     log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
193     vm_exit(1);
194   }
195 
196   maybe_fill_gc_region_gap(byte_size);
197 
198   size_t new_used = _buffer_used + byte_size;
199   ensure_buffer_space(new_used);
200 
201   HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
202   memset(mem, 0, byte_size);
203   {
204     // This is copied from MemAllocator::finish
205     if (UseCompactObjectHeaders) {
206       narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k);
207       oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
208     } else {
209       oopDesc::set_mark(mem, markWord::prototype());
210       oopDesc::release_set_klass(mem, k);
211     }
212   }
213   {
214     // This is copied from ObjArrayAllocator::initialize
215     arrayOopDesc::set_length(mem, length);
216   }
217 
218   objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
219   for (int i = 0; i < length; i++) {
220     // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
221     oop o = roots->at(i);
222     if (UseCompressedOops) {
223       * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
224     } else {
225       * arrayOop->obj_at_addr<oop>(i) = o;
226     }
227   }
228   log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
229 
230   _heap_roots_offset = _buffer_used;
231   _buffer_used = new_used;
232 }
233 
234 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
235   for (int i = 0; i < _source_objs->length(); i++) {
236     oop src_obj = _source_objs->at(i);
237     HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
238     assert(info != nullptr, "must be");
239     size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
240     info->set_buffer_offset(buffer_offset);
241 
242     _buffer_offset_to_source_obj_table->put(buffer_offset, src_obj);
243   }
244 
245   copy_roots_to_buffer(roots);
246 
247   log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots",
248                 _buffer_used, _source_objs->length() + 1, roots->length());
249 }
250 
251 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
252   size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
253   return byte_size;
254 }
255 
256 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
257   assert(is_object_aligned(fill_bytes), "must be");
258   size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
259 
260   int initial_length = to_array_length(fill_bytes / elemSize);
261   for (int length = initial_length; length >= 0; length --) {
262     size_t array_byte_size = filler_array_byte_size(length);
263     if (array_byte_size == fill_bytes) {
264       return length;
265     }
266   }
267 
268   ShouldNotReachHere();
269   return -1;
270 }
271 
272 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
273   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
274   Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
275   HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
276   memset(mem, 0, fill_bytes);

277   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
278   if (UseCompactObjectHeaders) {
279     oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
280   } else {
281     oopDesc::set_mark(mem, markWord::prototype());
282     cast_to_oop(mem)->set_narrow_klass(nk);
283   }
284   arrayOopDesc::set_length(mem, array_length);
285   return mem;
286 }
287 
288 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
289   // We fill only with arrays (so we don't need to use a single HeapWord filler if the
290   // leftover space is smaller than a zero-sized array object). Therefore, we need to
291   // make sure there's enough space of min_filler_byte_size in the current region after
292   // required_byte_size has been allocated. If not, fill the remainder of the current
293   // region.
294   size_t min_filler_byte_size = filler_array_byte_size(0);
295   size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
296 
297   const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
298   const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
299 
300   if (cur_min_region_bottom != next_min_region_bottom) {
301     // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
302     // we can map the region in any region-based collector.
303     assert(next_min_region_bottom > cur_min_region_bottom, "must be");
304     assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT,
305            "no buffered object can be larger than %d bytes",  MIN_GC_REGION_ALIGNMENT);
306 
307     const size_t filler_end = next_min_region_bottom;
308     const size_t fill_bytes = filler_end - _buffer_used;
309     assert(fill_bytes > 0, "must be");
310     ensure_buffer_space(filler_end);
311 
312     int array_length = filler_array_length(fill_bytes);
313     log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT,
314                         array_length, fill_bytes, _buffer_used);
315     HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes);
316     _buffer_used = filler_end;
317     _fillers->put((address)filler, fill_bytes);
318   }
319 }
320 
321 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
322   size_t* p = _fillers->get(buffered_addr);
323   if (p != nullptr) {
324     assert(*p > 0, "filler must be larger than zero bytes");
325     return *p;
326   } else {
327     return 0; // buffered_addr is not a filler
328   }
329 }
330 
331 template <typename T>
332 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
333   T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
334   *field_addr = value;
335 }
336 
337 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
338   assert(!is_too_large_to_archive(src_obj), "already checked");
339   size_t byte_size = src_obj->size() * HeapWordSize;
340   assert(byte_size > 0, "no zero-size objects");
341 
342   // For region-based collectors such as G1, the archive heap may be mapped into
343   // multiple regions. We need to make sure that we don't have an object that can possible
344   // span across two regions.
345   maybe_fill_gc_region_gap(byte_size);
346 
347   size_t new_used = _buffer_used + byte_size;
348   assert(new_used > _buffer_used, "no wrap around");
349 
350   size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
351   size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
352   assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
353 
354   ensure_buffer_space(new_used);
355 
356   address from = cast_from_oop<address>(src_obj);
357   address to = offset_to_buffered_address<address>(_buffer_used);
358   assert(is_object_aligned(_buffer_used), "sanity");
359   assert(is_object_aligned(byte_size), "sanity");
360   memcpy(to, from, byte_size);
361 
362   // These native pointers will be restored explicitly at run time.
363   if (java_lang_Module::is_instance(src_obj)) {
364     update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
365   } else if (java_lang_ClassLoader::is_instance(src_obj)) {
366 #ifdef ASSERT
367     // We only archive these loaders
368     if (src_obj != SystemDictionary::java_platform_loader() &&
369         src_obj != SystemDictionary::java_system_loader()) {
370       assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
371     }
372 #endif
373     update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
374   }
375 
376   size_t buffered_obj_offset = _buffer_used;
377   _buffer_used = new_used;
378 
379   return buffered_obj_offset;
380 }
381 
382 void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
383   assert(!info->is_used(), "only set once");
384   assert(UseG1GC, "must be");
385   address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
386   log_info(cds, heap)("Heap end = %p", heap_end);
387 
388   size_t heap_region_byte_size = _buffer_used;
389   assert(heap_region_byte_size > 0, "must archived at least one object!");
390 
391 
392   if (UseCompressedOops) {
393     _requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes);
394   } else {
395     // We always write the objects as if the heap started at this address. This
396     // makes the contents of the archive heap deterministic.
397     //
398     // Note that at runtime, the heap address is selected by the OS, so the archive
399     // heap will not be mapped at 0x10000000, and the contents need to be patched.
400     _requested_bottom = (address)NOCOOPS_REQUESTED_BASE;
401   }
402 
403   assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity");
404 
405   _requested_top = _requested_bottom + _buffer_used;
406 
407   info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
408                                     offset_to_buffered_address<HeapWord*>(_buffer_used)));
409   info->set_heap_roots_offset(_heap_roots_offset);
410 }
411 
412 // Oop relocation
413 
414 template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
415   assert(is_in_requested_range(cast_to_oop(p)), "must be");
416 
417   address addr = address(p);
418   assert(addr >= _requested_bottom, "must be");
419   size_t offset = addr - _requested_bottom;
420   return offset_to_buffered_address<T*>(offset);
421 }
422 
423 template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
424   oop o = load_oop_from_buffer(buffered_addr);
425   assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
426   return o;
427 }
428 
429 template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
430                                                                             oop request_oop) {
431   assert(is_in_requested_range(request_oop), "must be");
432   store_oop_in_buffer(buffered_addr, request_oop);
433 }
434 
435 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
436   *buffered_addr = requested_obj;
437 }
438 
439 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
440   narrowOop val = CompressedOops::encode_not_null(requested_obj);
441   *buffered_addr = val;
442 }
443 
444 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
445   return *buffered_addr;
446 }
447 
448 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
449   return CompressedOops::decode(*buffered_addr);
450 }
451 
452 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
453   oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
454   if (!CompressedOops::is_null(source_referent)) {
455     oop request_referent = source_obj_to_requested_obj(source_referent);
456     store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
457     mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
458   }
459 }
460 
461 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
462   T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
463   address requested_region_bottom;
464 
465   assert(request_p >= (T*)_requested_bottom, "sanity");
466   assert(request_p <  (T*)_requested_top, "sanity");
467   requested_region_bottom = _requested_bottom;
468 
469   // Mark the pointer in the oopmap
470   T* region_bottom = (T*)requested_region_bottom;
471   assert(request_p >= region_bottom, "must be");
472   BitMap::idx_t idx = request_p - region_bottom;
473   assert(idx < oopmap->size(), "overflow");
474   oopmap->set_bit(idx);
475 }
476 
477 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj,  Klass* src_klass) {
478   assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
479   narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
480   address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
481 
482   oop fake_oop = cast_to_oop(buffered_addr);
483   if (!UseCompactObjectHeaders) {
484     fake_oop->set_narrow_klass(nk);
485   }
486 
487   // We need to retain the identity_hash, because it may have been used by some hashtables
488   // in the shared heap. This also has the side effect of pre-initializing the
489   // identity_hash for all shared objects, so they are less likely to be written
490   // into during run time, increasing the potential of memory sharing.
491   if (src_obj != nullptr) {
492     intptr_t src_hash = src_obj->identity_hash();
493     if (UseCompactObjectHeaders) {
494       fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
495     } else {
496       fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
497     }
498     assert(fake_oop->mark().is_unlocked(), "sanity");
499 
500     DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
501     assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
502   }
503 }
504 
505 // Relocate an element in the buffered copy of HeapShared::roots()
506 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
507   size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
508   relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
509 }
510 
511 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
512   oop _src_obj;
513   address _buffered_obj;
514   CHeapBitMap* _oopmap;
515 
516 public:
517   EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
518     _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
519 
520   void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
521   void do_oop(      oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
522 
523 private:
524   template <class T> void do_oop_work(T *p) {
525     size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
526     ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap);
527   }
528 };
529 
530 // Update all oop fields embedded in the buffered objects
531 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
532                                                ArchiveHeapInfo* heap_info) {
533   size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
534   size_t heap_region_byte_size = _buffer_used;
535   heap_info->oopmap()->resize(heap_region_byte_size   / oopmap_unit);
536 
537   auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) {
538     oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset());
539     update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
540     address buffered_obj = offset_to_buffered_address<address>(info.buffer_offset());
541     EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
542     src_obj->oop_iterate(&relocator);
543   };
544   HeapShared::archived_object_cache()->iterate_all(iterator);
545 
546   // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
547   // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
548   oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset);
549   update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj());
550   int length = roots != nullptr ? roots->length() : 0;
551   for (int i = 0; i < length; i++) {
552     if (UseCompressedOops) {
553       relocate_root_at<narrowOop>(requested_roots, i, heap_info->oopmap());
554     } else {
555       relocate_root_at<oop>(requested_roots, i, heap_info->oopmap());
556     }
557   }
558 
559   compute_ptrmap(heap_info);
560 }
561 
562 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
563   Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
564   if (ptr != nullptr) {
565     NativePointerInfo info;
566     info._src_obj = src_obj;
567     info._field_offset = field_offset;
568     _native_pointers->append(info);
569   }
570 }
571 
572 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
573 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
574   HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
575   assert(p != nullptr, "must be");
576 
577   // requested_field_addr = the address of this field in the requested space
578   oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
579   Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
580   assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
581 
582   BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
583   return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true);
584 }
585 
586 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
587   int num_non_null_ptrs = 0;
588   Metadata** bottom = (Metadata**) _requested_bottom;
589   Metadata** top = (Metadata**) _requested_top; // exclusive
590   heap_info->ptrmap()->resize(top - bottom);
591 
592   BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
593   for (int i = 0; i < _native_pointers->length(); i++) {
594     NativePointerInfo info = _native_pointers->at(i);
595     oop src_obj = info._src_obj;
596     int field_offset = info._field_offset;
597     HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
598     // requested_field_addr = the address of this field in the requested space
599     oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
600     Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
601     assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
602 
603     // Mark this field in the bitmap
604     BitMap::idx_t idx = requested_field_addr - bottom;
605     heap_info->ptrmap()->set_bit(idx);
606     num_non_null_ptrs ++;
607     max_idx = MAX2(max_idx, idx);
608 
609     // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
610     // this address if the RO/RW regions are mapped at the default location).
611 
612     Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
613     Metadata* native_ptr = *buffered_field_addr;
614     assert(native_ptr != nullptr, "sanity");
615 
616     address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
617     address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
618     *buffered_field_addr = (Metadata*)requested_native_ptr;
619   }
620 
621   heap_info->ptrmap()->resize(max_idx + 1);
622   log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
623                       num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
624 }
625 
626 #endif // INCLUDE_CDS_JAVA_HEAP
--- EOF ---