26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
277 address AOTMappedHeapWriter::requested_address() {
278 assert(_buffer != nullptr, "must be initialized");
279 return _requested_bottom;
280 }
281
282 void AOTMappedHeapWriter::allocate_buffer() {
283 int initial_buffer_size = 100000;
284 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
285 _buffer_used = 0;
286 ensure_buffer_space(1); // so that buffer_bottom() works
287 }
288
289 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
290 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
291 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
292 _buffer->at_grow(to_array_index(min_bytes));
293 }
294
295 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
296 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
297 memset(mem, 0, objArrayOopDesc::object_size(element_count));
298
299 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
300 if (UseCompactObjectHeaders) {
301 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
302 } else {
303 oopDesc::set_mark(mem, markWord::prototype());
304 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
305 }
306 arrayOopDesc::set_length(mem, element_count);
307 return objArrayOop(cast_to_oop(mem));
308 }
309
310 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
311 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
312 if (UseCompressedOops) {
313 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
314 } else {
315 *segment->obj_at_addr<oop>(index) = root;
316 }
317 }
318
319 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
320 // Depending on the number of classes we are archiving, a single roots array may be
321 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
322 // allows us to chop the large array into a series of "segments". Current layout
323 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
324 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
325 // This is simple and efficient. We do not need filler objects anywhere between the segments,
326 // or immediately after the last segment. This allows starting the object dump immediately
327 // after the roots.
328
329 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
330 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
331
332 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
333 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
334 "Should match exactly");
335
336 HeapRootSegments segments(_buffer_used,
337 roots->length(),
338 MIN_GC_REGION_ALIGNMENT,
339 max_elem_count);
340
341 int root_index = 0;
342 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
343 int size_elems = segments.size_in_elems(seg_idx);
344 size_t size_bytes = segments.size_in_bytes(seg_idx);
345
346 size_t oop_offset = _buffer_used;
347 _buffer_used = oop_offset + size_bytes;
348 ensure_buffer_space(_buffer_used);
349
350 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
351 "Roots segment %zu start is not aligned: %zu",
352 segments.count(), oop_offset);
353
428 oop src_obj = _source_objs->at(src_obj_index);
429 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
430 assert(info != nullptr, "must be");
431 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
432 info->set_buffer_offset(buffer_offset);
433
434 OopHandle handle(Universe::vm_global(), src_obj);
435 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
436 _buffer_offset_to_source_obj_table->maybe_grow();
437
438 if (java_lang_Module::is_instance(src_obj)) {
439 Modules::check_archived_module_oop(src_obj);
440 }
441 }
442
443 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
444 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
445 }
446
447 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
448 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
449 return byte_size;
450 }
451
452 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
453 assert(is_object_aligned(fill_bytes), "must be");
454 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
455
456 int initial_length = to_array_length(fill_bytes / elemSize);
457 for (int length = initial_length; length >= 0; length --) {
458 size_t array_byte_size = filler_array_byte_size(length);
459 if (array_byte_size == fill_bytes) {
460 return length;
461 }
462 }
463
464 ShouldNotReachHere();
465 return -1;
466 }
467
468 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
469 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
470 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
471 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
472 memset(mem, 0, fill_bytes);
473 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
474 if (UseCompactObjectHeaders) {
475 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
476 } else {
477 oopDesc::set_mark(mem, markWord::prototype());
478 cast_to_oop(mem)->set_narrow_klass(nk);
479 }
480 arrayOopDesc::set_length(mem, array_length);
481 return mem;
482 }
483
484 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
485 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
486 // leftover space is smaller than a zero-sized array object). Therefore, we need to
487 // make sure there's enough space of min_filler_byte_size in the current region after
488 // required_byte_size has been allocated. If not, fill the remainder of the current
489 // region.
490 size_t min_filler_byte_size = filler_array_byte_size(0);
491 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
492
493 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
494 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
495
496 if (cur_min_region_bottom != next_min_region_bottom) {
711
712 assert(request_p >= (T*)_requested_bottom, "sanity");
713 assert(request_p < (T*)_requested_top, "sanity");
714 requested_region_bottom = _requested_bottom;
715
716 // Mark the pointer in the oopmap
717 T* region_bottom = (T*)requested_region_bottom;
718 assert(request_p >= region_bottom, "must be");
719 BitMap::idx_t idx = request_p - region_bottom;
720 assert(idx < oopmap->size(), "overflow");
721 oopmap->set_bit(idx);
722 }
723
724 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
725 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
726 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
727 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
728
729 oop fake_oop = cast_to_oop(buffered_addr);
730 if (UseCompactObjectHeaders) {
731 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
732 } else {
733 fake_oop->set_narrow_klass(nk);
734 }
735
736 if (src_obj == nullptr) {
737 return;
738 }
739 // We need to retain the identity_hash, because it may have been used by some hashtables
740 // in the shared heap.
741 if (!src_obj->fast_no_hash_check()) {
742 intptr_t src_hash = src_obj->identity_hash();
743 if (UseCompactObjectHeaders) {
744 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
745 } else {
746 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
747 }
748 assert(fake_oop->mark().is_unlocked(), "sanity");
749
750 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
751 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
752 }
753 // Strip age bits.
754 fake_oop->set_mark(fake_oop->mark().set_age(0));
755 }
756
757 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
758 oop _src_obj;
759 address _buffered_obj;
760 CHeapBitMap* _oopmap;
761 bool _is_java_lang_ref;
762 public:
763 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
764 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|
26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #if INCLUDE_G1GC
51 #include "gc/g1/g1CollectedHeap.hpp"
52 #include "gc/g1/g1HeapRegion.hpp"
53 #endif
54
55 #if INCLUDE_CDS_JAVA_HEAP
56
57 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
58
59 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
60 size_t AOTMappedHeapWriter::_buffer_used;
61
62 // Heap root segments
63 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
64
65 address AOTMappedHeapWriter::_requested_bottom;
66 address AOTMappedHeapWriter::_requested_top;
278 address AOTMappedHeapWriter::requested_address() {
279 assert(_buffer != nullptr, "must be initialized");
280 return _requested_bottom;
281 }
282
283 void AOTMappedHeapWriter::allocate_buffer() {
284 int initial_buffer_size = 100000;
285 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
286 _buffer_used = 0;
287 ensure_buffer_space(1); // so that buffer_bottom() works
288 }
289
290 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
291 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
292 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
293 _buffer->at_grow(to_array_index(min_bytes));
294 }
295
296 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
297 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
298 memset(mem, 0, refArrayOopDesc::object_size(element_count));
299
300 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
301 if (UseCompactObjectHeaders) {
302 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
303 } else {
304 assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
305 oopDesc::set_mark(mem, markWord::prototype());
306 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
307 }
308 arrayOopDesc::set_length(mem, element_count);
309 return objArrayOop(cast_to_oop(mem));
310 }
311
312 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
313 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
314 if (UseCompressedOops) {
315 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
316 } else {
317 *segment->obj_at_addr<oop>(index) = root;
318 }
319 }
320
321 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
322 // Depending on the number of classes we are archiving, a single roots array may be
323 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
324 // allows us to chop the large array into a series of "segments". Current layout
325 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
326 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
327 // This is simple and efficient. We do not need filler objects anywhere between the segments,
328 // or immediately after the last segment. This allows starting the object dump immediately
329 // after the roots.
330
331 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
332 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
333
334 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
335 assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
336 "Should match exactly");
337
338 HeapRootSegments segments(_buffer_used,
339 roots->length(),
340 MIN_GC_REGION_ALIGNMENT,
341 max_elem_count);
342
343 int root_index = 0;
344 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
345 int size_elems = segments.size_in_elems(seg_idx);
346 size_t size_bytes = segments.size_in_bytes(seg_idx);
347
348 size_t oop_offset = _buffer_used;
349 _buffer_used = oop_offset + size_bytes;
350 ensure_buffer_space(_buffer_used);
351
352 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
353 "Roots segment %zu start is not aligned: %zu",
354 segments.count(), oop_offset);
355
430 oop src_obj = _source_objs->at(src_obj_index);
431 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
432 assert(info != nullptr, "must be");
433 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
434 info->set_buffer_offset(buffer_offset);
435
436 OopHandle handle(Universe::vm_global(), src_obj);
437 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
438 _buffer_offset_to_source_obj_table->maybe_grow();
439
440 if (java_lang_Module::is_instance(src_obj)) {
441 Modules::check_archived_module_oop(src_obj);
442 }
443 }
444
445 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
446 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
447 }
448
449 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
450 size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
451 return byte_size;
452 }
453
454 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
455 assert(is_object_aligned(fill_bytes), "must be");
456 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
457
458 int initial_length = to_array_length(fill_bytes / elemSize);
459 for (int length = initial_length; length >= 0; length --) {
460 size_t array_byte_size = filler_array_byte_size(length);
461 if (array_byte_size == fill_bytes) {
462 return length;
463 }
464 }
465
466 ShouldNotReachHere();
467 return -1;
468 }
469
470 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
471 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
472 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
473 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
474 memset(mem, 0, fill_bytes);
475 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
476 if (UseCompactObjectHeaders) {
477 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
478 } else {
479 assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
480 oopDesc::set_mark(mem, markWord::prototype());
481 cast_to_oop(mem)->set_narrow_klass(nk);
482 }
483 arrayOopDesc::set_length(mem, array_length);
484 return mem;
485 }
486
487 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
488 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
489 // leftover space is smaller than a zero-sized array object). Therefore, we need to
490 // make sure there's enough space of min_filler_byte_size in the current region after
491 // required_byte_size has been allocated. If not, fill the remainder of the current
492 // region.
493 size_t min_filler_byte_size = filler_array_byte_size(0);
494 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
495
496 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
497 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
498
499 if (cur_min_region_bottom != next_min_region_bottom) {
714
715 assert(request_p >= (T*)_requested_bottom, "sanity");
716 assert(request_p < (T*)_requested_top, "sanity");
717 requested_region_bottom = _requested_bottom;
718
719 // Mark the pointer in the oopmap
720 T* region_bottom = (T*)requested_region_bottom;
721 assert(request_p >= region_bottom, "must be");
722 BitMap::idx_t idx = request_p - region_bottom;
723 assert(idx < oopmap->size(), "overflow");
724 oopmap->set_bit(idx);
725 }
726
727 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
728 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
729 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
730 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
731
732 oop fake_oop = cast_to_oop(buffered_addr);
733 if (UseCompactObjectHeaders) {
734 markWord prototype_header = src_klass->prototype_header().set_narrow_klass(nk);
735 fake_oop->set_mark(prototype_header);
736 } else {
737 fake_oop->set_narrow_klass(nk);
738 }
739
740 if (src_obj == nullptr) {
741 return;
742 }
743 // We need to retain the identity_hash, because it may have been used by some hashtables
744 // in the shared heap.
745 if (!src_obj->fast_no_hash_check() && (!(Arguments::is_valhalla_enabled() && src_obj->mark().is_inline_type()))) {
746 intptr_t src_hash = src_obj->identity_hash();
747 if (UseCompactObjectHeaders) {
748 fake_oop->set_mark(fake_oop->mark().copy_set_hash(src_hash));
749 } else if (Arguments::is_valhalla_enabled()) {
750 fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
751 } else {
752 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
753 }
754 assert(fake_oop->mark().is_unlocked(), "sanity");
755
756 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
757 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
758 }
759 // Strip age bits.
760 fake_oop->set_mark(fake_oop->mark().set_age(0));
761 }
762
763 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
764 oop _src_obj;
765 address _buffered_obj;
766 CHeapBitMap* _oopmap;
767 bool _is_java_lang_ref;
768 public:
769 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
770 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|