277 address AOTMappedHeapWriter::requested_address() {
278 assert(_buffer != nullptr, "must be initialized");
279 return _requested_bottom;
280 }
281
282 void AOTMappedHeapWriter::allocate_buffer() {
283 int initial_buffer_size = 100000;
284 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
285 _buffer_used = 0;
286 ensure_buffer_space(1); // so that buffer_bottom() works
287 }
288
289 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
290 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
291 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
292 _buffer->at_grow(to_array_index(min_bytes));
293 }
294
295 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
296 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
297 memset(mem, 0, objArrayOopDesc::object_size(element_count));
298
299 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
300 if (UseCompactObjectHeaders) {
301 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
302 } else {
303 oopDesc::set_mark(mem, markWord::prototype());
304 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
305 }
306 arrayOopDesc::set_length(mem, element_count);
307 return objArrayOop(cast_to_oop(mem));
308 }
309
310 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
311 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
312 if (UseCompressedOops) {
313 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
314 } else {
315 *segment->obj_at_addr<oop>(index) = root;
316 }
317 }
318
319 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
320 // Depending on the number of classes we are archiving, a single roots array may be
321 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
322 // allows us to chop the large array into a series of "segments". Current layout
323 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
324 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
325 // This is simple and efficient. We do not need filler objects anywhere between the segments,
326 // or immediately after the last segment. This allows starting the object dump immediately
327 // after the roots.
328
329 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
330 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
331
332 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
333 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
334 "Should match exactly");
335
336 HeapRootSegments segments(_buffer_used,
337 roots->length(),
338 MIN_GC_REGION_ALIGNMENT,
339 max_elem_count);
340
341 int root_index = 0;
342 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
343 int size_elems = segments.size_in_elems(seg_idx);
344 size_t size_bytes = segments.size_in_bytes(seg_idx);
345
346 size_t oop_offset = _buffer_used;
347 _buffer_used = oop_offset + size_bytes;
348 ensure_buffer_space(_buffer_used);
349
350 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
351 "Roots segment %zu start is not aligned: %zu",
352 segments.count(), oop_offset);
353
428 oop src_obj = _source_objs->at(src_obj_index);
429 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
430 assert(info != nullptr, "must be");
431 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
432 info->set_buffer_offset(buffer_offset);
433
434 OopHandle handle(Universe::vm_global(), src_obj);
435 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
436 _buffer_offset_to_source_obj_table->maybe_grow();
437
438 if (java_lang_Module::is_instance(src_obj)) {
439 Modules::check_archived_module_oop(src_obj);
440 }
441 }
442
443 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
444 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
445 }
446
447 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
448 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
449 return byte_size;
450 }
451
452 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
453 assert(is_object_aligned(fill_bytes), "must be");
454 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
455
456 int initial_length = to_array_length(fill_bytes / elemSize);
457 for (int length = initial_length; length >= 0; length --) {
458 size_t array_byte_size = filler_array_byte_size(length);
459 if (array_byte_size == fill_bytes) {
460 return length;
461 }
462 }
463
464 ShouldNotReachHere();
465 return -1;
466 }
467
468 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
721 oopmap->set_bit(idx);
722 }
723
724 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
725 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
726 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
727 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
728
729 oop fake_oop = cast_to_oop(buffered_addr);
730 if (UseCompactObjectHeaders) {
731 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
732 } else {
733 fake_oop->set_narrow_klass(nk);
734 }
735
736 if (src_obj == nullptr) {
737 return;
738 }
739 // We need to retain the identity_hash, because it may have been used by some hashtables
740 // in the shared heap.
741 if (!src_obj->fast_no_hash_check()) {
742 intptr_t src_hash = src_obj->identity_hash();
743 if (UseCompactObjectHeaders) {
744 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
745 } else {
746 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
747 }
748 assert(fake_oop->mark().is_unlocked(), "sanity");
749
750 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
751 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
752 }
753 // Strip age bits.
754 fake_oop->set_mark(fake_oop->mark().set_age(0));
755 }
756
757 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
758 oop _src_obj;
759 address _buffered_obj;
760 CHeapBitMap* _oopmap;
761 bool _is_java_lang_ref;
762 public:
763 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
764 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|
277 address AOTMappedHeapWriter::requested_address() {
278 assert(_buffer != nullptr, "must be initialized");
279 return _requested_bottom;
280 }
281
282 void AOTMappedHeapWriter::allocate_buffer() {
283 int initial_buffer_size = 100000;
284 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
285 _buffer_used = 0;
286 ensure_buffer_space(1); // so that buffer_bottom() works
287 }
288
289 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
290 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
291 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
292 _buffer->at_grow(to_array_index(min_bytes));
293 }
294
295 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
296 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
297 memset(mem, 0, refArrayOopDesc::object_size(element_count));
298
299 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
300 if (UseCompactObjectHeaders) {
301 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
302 } else {
303 assert(!EnableValhalla || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
304 oopDesc::set_mark(mem, markWord::prototype());
305 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
306 }
307 arrayOopDesc::set_length(mem, element_count);
308 return objArrayOop(cast_to_oop(mem));
309 }
310
311 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
312 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
313 if (UseCompressedOops) {
314 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
315 } else {
316 *segment->obj_at_addr<oop>(index) = root;
317 }
318 }
319
320 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
321 // Depending on the number of classes we are archiving, a single roots array may be
322 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
323 // allows us to chop the large array into a series of "segments". Current layout
324 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
325 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
326 // This is simple and efficient. We do not need filler objects anywhere between the segments,
327 // or immediately after the last segment. This allows starting the object dump immediately
328 // after the roots.
329
330 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
331 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
332
333 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
334 assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
335 "Should match exactly");
336
337 HeapRootSegments segments(_buffer_used,
338 roots->length(),
339 MIN_GC_REGION_ALIGNMENT,
340 max_elem_count);
341
342 int root_index = 0;
343 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
344 int size_elems = segments.size_in_elems(seg_idx);
345 size_t size_bytes = segments.size_in_bytes(seg_idx);
346
347 size_t oop_offset = _buffer_used;
348 _buffer_used = oop_offset + size_bytes;
349 ensure_buffer_space(_buffer_used);
350
351 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
352 "Roots segment %zu start is not aligned: %zu",
353 segments.count(), oop_offset);
354
429 oop src_obj = _source_objs->at(src_obj_index);
430 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
431 assert(info != nullptr, "must be");
432 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
433 info->set_buffer_offset(buffer_offset);
434
435 OopHandle handle(Universe::vm_global(), src_obj);
436 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
437 _buffer_offset_to_source_obj_table->maybe_grow();
438
439 if (java_lang_Module::is_instance(src_obj)) {
440 Modules::check_archived_module_oop(src_obj);
441 }
442 }
443
444 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
445 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
446 }
447
448 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
449 size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
450 return byte_size;
451 }
452
453 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
454 assert(is_object_aligned(fill_bytes), "must be");
455 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
456
457 int initial_length = to_array_length(fill_bytes / elemSize);
458 for (int length = initial_length; length >= 0; length --) {
459 size_t array_byte_size = filler_array_byte_size(length);
460 if (array_byte_size == fill_bytes) {
461 return length;
462 }
463 }
464
465 ShouldNotReachHere();
466 return -1;
467 }
468
469 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
722 oopmap->set_bit(idx);
723 }
724
725 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
726 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
727 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
728 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
729
730 oop fake_oop = cast_to_oop(buffered_addr);
731 if (UseCompactObjectHeaders) {
732 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
733 } else {
734 fake_oop->set_narrow_klass(nk);
735 }
736
737 if (src_obj == nullptr) {
738 return;
739 }
740 // We need to retain the identity_hash, because it may have been used by some hashtables
741 // in the shared heap.
742 if (!src_obj->fast_no_hash_check() && (!(EnableValhalla && src_obj->mark().is_inline_type()))) {
743 intptr_t src_hash = src_obj->identity_hash();
744 if (UseCompactObjectHeaders) {
745 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
746 } else if (EnableValhalla) {
747 fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
748 } else {
749 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
750 }
751 assert(fake_oop->mark().is_unlocked(), "sanity");
752
753 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
754 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
755 }
756 // Strip age bits.
757 fake_oop->set_mark(fake_oop->mark().set_age(0));
758 }
759
760 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
761 oop _src_obj;
762 address _buffered_obj;
763 CHeapBitMap* _oopmap;
764 bool _is_java_lang_ref;
765 public:
766 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
767 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|