212 address ArchiveHeapWriter::requested_address() {
213 assert(_buffer != nullptr, "must be initialized");
214 return _requested_bottom;
215 }
216
217 void ArchiveHeapWriter::allocate_buffer() {
218 int initial_buffer_size = 100000;
219 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
220 _buffer_used = 0;
221 ensure_buffer_space(1); // so that buffer_bottom() works
222 }
223
224 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
225 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
226 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
227 _buffer->at_grow(to_array_index(min_bytes));
228 }
229
230 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
231 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
232 memset(mem, 0, objArrayOopDesc::object_size(element_count));
233
234 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
235 if (UseCompactObjectHeaders) {
236 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
237 } else {
238 oopDesc::set_mark(mem, markWord::prototype());
239 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
240 }
241 arrayOopDesc::set_length(mem, element_count);
242 return objArrayOop(cast_to_oop(mem));
243 }
244
245 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
246 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
247 if (UseCompressedOops) {
248 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
249 } else {
250 *segment->obj_at_addr<oop>(index) = root;
251 }
252 }
253
254 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
255 // Depending on the number of classes we are archiving, a single roots array may be
256 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
257 // allows us to chop the large array into a series of "segments". Current layout
258 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
259 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
260 // This is simple and efficient. We do not need filler objects anywhere between the segments,
261 // or immediately after the last segment. This allows starting the object dump immediately
262 // after the roots.
263
264 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
265 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
266
267 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
268 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
269 "Should match exactly");
270
271 HeapRootSegments segments(_buffer_used,
272 roots->length(),
273 MIN_GC_REGION_ALIGNMENT,
274 max_elem_count);
275
276 int root_index = 0;
277 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
278 int size_elems = segments.size_in_elems(seg_idx);
279 size_t size_bytes = segments.size_in_bytes(seg_idx);
280
281 size_t oop_offset = _buffer_used;
282 _buffer_used = oop_offset + size_bytes;
283 ensure_buffer_space(_buffer_used);
284
285 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
286 "Roots segment %zu start is not aligned: %zu",
287 segments.count(), oop_offset);
288
363 oop src_obj = _source_objs->at(src_obj_index);
364 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
365 assert(info != nullptr, "must be");
366 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
367 info->set_buffer_offset(buffer_offset);
368
369 OopHandle handle(Universe::vm_global(), src_obj);
370 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
371 _buffer_offset_to_source_obj_table->maybe_grow();
372
373 if (java_lang_Module::is_instance(src_obj)) {
374 Modules::check_archived_module_oop(src_obj);
375 }
376 }
377
378 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
379 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
380 }
381
382 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
383 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
384 return byte_size;
385 }
386
387 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
388 assert(is_object_aligned(fill_bytes), "must be");
389 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
390
391 int initial_length = to_array_length(fill_bytes / elemSize);
392 for (int length = initial_length; length >= 0; length --) {
393 size_t array_byte_size = filler_array_byte_size(length);
394 if (array_byte_size == fill_bytes) {
395 return length;
396 }
397 }
398
399 ShouldNotReachHere();
400 return -1;
401 }
402
403 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
620 oopmap->set_bit(idx);
621 }
622
623 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
624 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
625 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
626 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
627
628 oop fake_oop = cast_to_oop(buffered_addr);
629 if (UseCompactObjectHeaders) {
630 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
631 } else {
632 fake_oop->set_narrow_klass(nk);
633 }
634
635 if (src_obj == nullptr) {
636 return;
637 }
638 // We need to retain the identity_hash, because it may have been used by some hashtables
639 // in the shared heap.
640 if (!src_obj->fast_no_hash_check()) {
641 intptr_t src_hash = src_obj->identity_hash();
642 if (UseCompactObjectHeaders) {
643 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
644 } else {
645 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
646 }
647 assert(fake_oop->mark().is_unlocked(), "sanity");
648
649 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
650 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
651 }
652 // Strip age bits.
653 fake_oop->set_mark(fake_oop->mark().set_age(0));
654 }
655
656 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
657 oop _src_obj;
658 address _buffered_obj;
659 CHeapBitMap* _oopmap;
660 bool _is_java_lang_ref;
661 public:
662 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
663 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|
212 address ArchiveHeapWriter::requested_address() {
213 assert(_buffer != nullptr, "must be initialized");
214 return _requested_bottom;
215 }
216
217 void ArchiveHeapWriter::allocate_buffer() {
218 int initial_buffer_size = 100000;
219 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
220 _buffer_used = 0;
221 ensure_buffer_space(1); // so that buffer_bottom() works
222 }
223
224 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
225 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
226 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
227 _buffer->at_grow(to_array_index(min_bytes));
228 }
229
230 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
231 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
232 memset(mem, 0, refArrayOopDesc::object_size(element_count));
233
234 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
235 if (UseCompactObjectHeaders) {
236 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
237 } else {
238 assert(!EnableValhalla || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
239 oopDesc::set_mark(mem, markWord::prototype());
240 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
241 }
242 arrayOopDesc::set_length(mem, element_count);
243 return objArrayOop(cast_to_oop(mem));
244 }
245
246 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
247 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
248 if (UseCompressedOops) {
249 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
250 } else {
251 *segment->obj_at_addr<oop>(index) = root;
252 }
253 }
254
255 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
256 // Depending on the number of classes we are archiving, a single roots array may be
257 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
258 // allows us to chop the large array into a series of "segments". Current layout
259 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
260 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
261 // This is simple and efficient. We do not need filler objects anywhere between the segments,
262 // or immediately after the last segment. This allows starting the object dump immediately
263 // after the roots.
264
265 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
266 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
267
268 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
269 assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
270 "Should match exactly");
271
272 HeapRootSegments segments(_buffer_used,
273 roots->length(),
274 MIN_GC_REGION_ALIGNMENT,
275 max_elem_count);
276
277 int root_index = 0;
278 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
279 int size_elems = segments.size_in_elems(seg_idx);
280 size_t size_bytes = segments.size_in_bytes(seg_idx);
281
282 size_t oop_offset = _buffer_used;
283 _buffer_used = oop_offset + size_bytes;
284 ensure_buffer_space(_buffer_used);
285
286 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
287 "Roots segment %zu start is not aligned: %zu",
288 segments.count(), oop_offset);
289
364 oop src_obj = _source_objs->at(src_obj_index);
365 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
366 assert(info != nullptr, "must be");
367 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
368 info->set_buffer_offset(buffer_offset);
369
370 OopHandle handle(Universe::vm_global(), src_obj);
371 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
372 _buffer_offset_to_source_obj_table->maybe_grow();
373
374 if (java_lang_Module::is_instance(src_obj)) {
375 Modules::check_archived_module_oop(src_obj);
376 }
377 }
378
379 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
380 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
381 }
382
383 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
384 size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
385 return byte_size;
386 }
387
388 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
389 assert(is_object_aligned(fill_bytes), "must be");
390 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
391
392 int initial_length = to_array_length(fill_bytes / elemSize);
393 for (int length = initial_length; length >= 0; length --) {
394 size_t array_byte_size = filler_array_byte_size(length);
395 if (array_byte_size == fill_bytes) {
396 return length;
397 }
398 }
399
400 ShouldNotReachHere();
401 return -1;
402 }
403
404 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
621 oopmap->set_bit(idx);
622 }
623
624 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
625 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
626 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
627 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
628
629 oop fake_oop = cast_to_oop(buffered_addr);
630 if (UseCompactObjectHeaders) {
631 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
632 } else {
633 fake_oop->set_narrow_klass(nk);
634 }
635
636 if (src_obj == nullptr) {
637 return;
638 }
639 // We need to retain the identity_hash, because it may have been used by some hashtables
640 // in the shared heap.
641 if (!src_obj->fast_no_hash_check() && (!(EnableValhalla && src_obj->mark().is_inline_type()))) {
642 intptr_t src_hash = src_obj->identity_hash();
643 if (UseCompactObjectHeaders) {
644 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
645 } else if (EnableValhalla) {
646 fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
647 } else {
648 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
649 }
650 assert(fake_oop->mark().is_unlocked(), "sanity");
651
652 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
653 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
654 }
655 // Strip age bits.
656 fake_oop->set_mark(fake_oop->mark().set_age(0));
657 }
658
659 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
660 oop _src_obj;
661 address _buffered_obj;
662 CHeapBitMap* _oopmap;
663 bool _is_java_lang_ref;
664 public:
665 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
666 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|