26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/java.hpp"
47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
257 address AOTMappedHeapWriter::requested_address() {
258 assert(_buffer != nullptr, "must be initialized");
259 return _requested_bottom;
260 }
261
262 void AOTMappedHeapWriter::allocate_buffer() {
263 int initial_buffer_size = 100000;
264 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
265 _buffer_used = 0;
266 ensure_buffer_space(1); // so that buffer_bottom() works
267 }
268
269 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
270 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
271 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
272 _buffer->at_grow(to_array_index(min_bytes));
273 }
274
275 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
276 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
277 memset(mem, 0, objArrayOopDesc::object_size(element_count));
278
279 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
280 if (UseCompactObjectHeaders) {
281 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
282 } else {
283 oopDesc::set_mark(mem, markWord::prototype());
284 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
285 }
286 arrayOopDesc::set_length(mem, element_count);
287 return objArrayOop(cast_to_oop(mem));
288 }
289
290 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
291 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
292 if (UseCompressedOops) {
293 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
294 } else {
295 *segment->obj_at_addr<oop>(index) = root;
296 }
297 }
298
299 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
300 // Depending on the number of classes we are archiving, a single roots array may be
301 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
302 // allows us to chop the large array into a series of "segments". Current layout
303 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
304 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
305 // This is simple and efficient. We do not need filler objects anywhere between the segments,
306 // or immediately after the last segment. This allows starting the object dump immediately
307 // after the roots.
308
309 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
310 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
311
312 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
313 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
314 "Should match exactly");
315
316 HeapRootSegments segments(_buffer_used,
317 roots->length(),
318 MIN_GC_REGION_ALIGNMENT,
319 max_elem_count);
320
321 int root_index = 0;
322 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
323 int size_elems = segments.size_in_elems(seg_idx);
324 size_t size_bytes = segments.size_in_bytes(seg_idx);
325
326 size_t oop_offset = _buffer_used;
327 _buffer_used = oop_offset + size_bytes;
328 ensure_buffer_space(_buffer_used);
329
330 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
331 "Roots segment %zu start is not aligned: %zu",
332 segments.count(), oop_offset);
333
412 info->set_buffer_offset(buffer_offset);
413 assert(buffer_offset <= 0x7fffffff, "sanity");
414
415 OopHandle handle(Universe::vm_global(), src_obj);
416 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
417 _buffer_offset_to_source_obj_table->maybe_grow();
418
419 if (java_lang_Module::is_instance(src_obj)) {
420 Modules::check_archived_module_oop(src_obj);
421 }
422 }
423
424 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
425 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
426 log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
427 log_info(aot)(" packages = %8zu", _num_packages);
428 log_info(aot)(" protection domains = %8zu", _num_protection_domains);
429 }
430
431 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
432 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
433 return byte_size;
434 }
435
436 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
437 assert(is_object_aligned(fill_bytes), "must be");
438 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
439
440 int initial_length = to_array_length(fill_bytes / elemSize);
441 for (int length = initial_length; length >= 0; length --) {
442 size_t array_byte_size = filler_array_byte_size(length);
443 if (array_byte_size == fill_bytes) {
444 return length;
445 }
446 }
447
448 ShouldNotReachHere();
449 return -1;
450 }
451
452 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
453 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
454 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
455 memset(mem, 0, fill_bytes);
456 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
457 if (UseCompactObjectHeaders) {
458 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
459 } else {
460 oopDesc::set_mark(mem, markWord::prototype());
461 cast_to_oop(mem)->set_narrow_klass(nk);
462 }
463 arrayOopDesc::set_length(mem, array_length);
464 return mem;
465 }
466
467 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
468 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
469 // leftover space is smaller than a zero-sized array object). Therefore, we need to
470 // make sure there's enough space of min_filler_byte_size in the current region after
471 // required_byte_size has been allocated. If not, fill the remainder of the current
472 // region.
473 size_t min_filler_byte_size = filler_array_byte_size(0);
474 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
475
476 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
477 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
478
479 if (cur_min_region_bottom != next_min_region_bottom) {
711 address requested_region_bottom;
712
713 assert(request_p >= (T*)_requested_bottom, "sanity");
714 assert(request_p < (T*)_requested_top, "sanity");
715 requested_region_bottom = _requested_bottom;
716
717 // Mark the pointer in the oopmap
718 T* region_bottom = (T*)requested_region_bottom;
719 assert(request_p >= region_bottom, "must be");
720 BitMap::idx_t idx = request_p - region_bottom;
721 assert(idx < oopmap->size(), "overflow");
722 oopmap->set_bit(idx);
723 }
724
725 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
726 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
727 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
728
729 oop fake_oop = cast_to_oop(buffered_addr);
730 if (UseCompactObjectHeaders) {
731 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk));
732 } else {
733 fake_oop->set_narrow_klass(nk);
734 }
735
736 if (src_obj == nullptr) {
737 return;
738 }
739 // We need to retain the identity_hash, because it may have been used by some hashtables
740 // in the shared heap.
741 if (!src_obj->fast_no_hash_check()) {
742 intptr_t src_hash = src_obj->identity_hash();
743 if (UseCompactObjectHeaders) {
744 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash));
745 } else {
746 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
747 }
748 assert(fake_oop->mark().is_unlocked(), "sanity");
749
750 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
751 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
752 }
753 // Strip age bits.
754 fake_oop->set_mark(fake_oop->mark().set_age(0));
755 }
756
757 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
758 oop _src_obj;
759 address _buffered_obj;
760 CHeapBitMap* _oopmap;
761 bool _is_java_lang_ref;
762 public:
763 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
764 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|
26 #include "cds/aotMappedHeapWriter.hpp"
27 #include "cds/aotReferenceObjSupport.hpp"
28 #include "cds/cdsConfig.hpp"
29 #include "cds/filemap.hpp"
30 #include "cds/heapShared.inline.hpp"
31 #include "cds/regeneratedClasses.hpp"
32 #include "classfile/javaClasses.hpp"
33 #include "classfile/modules.hpp"
34 #include "classfile/systemDictionary.hpp"
35 #include "gc/shared/collectedHeap.hpp"
36 #include "memory/allocation.inline.hpp"
37 #include "memory/iterator.inline.hpp"
38 #include "memory/oopFactory.hpp"
39 #include "memory/universe.hpp"
40 #include "oops/compressedOops.hpp"
41 #include "oops/objArrayOop.inline.hpp"
42 #include "oops/oop.inline.hpp"
43 #include "oops/oopHandle.inline.hpp"
44 #include "oops/typeArrayKlass.hpp"
45 #include "oops/typeArrayOop.hpp"
46 #include "runtime/arguments.hpp"
47 #include "runtime/java.hpp"
48 #include "runtime/mutexLocker.hpp"
49 #include "utilities/bitMap.inline.hpp"
50 #if INCLUDE_G1GC
51 #include "gc/g1/g1CollectedHeap.hpp"
52 #include "gc/g1/g1HeapRegion.hpp"
53 #endif
54
55 #if INCLUDE_CDS_JAVA_HEAP
56
57 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
58
59 bool AOTMappedHeapWriter::_is_writing_deterministic_heap = false;
60 size_t AOTMappedHeapWriter::_buffer_used;
61
62 // Heap root segments
63 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
64
65 address AOTMappedHeapWriter::_requested_bottom;
66 address AOTMappedHeapWriter::_requested_top;
258 address AOTMappedHeapWriter::requested_address() {
259 assert(_buffer != nullptr, "must be initialized");
260 return _requested_bottom;
261 }
262
263 void AOTMappedHeapWriter::allocate_buffer() {
264 int initial_buffer_size = 100000;
265 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
266 _buffer_used = 0;
267 ensure_buffer_space(1); // so that buffer_bottom() works
268 }
269
270 void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
271 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
272 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
273 _buffer->at_grow(to_array_index(min_bytes));
274 }
275
276 objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
277 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
278 memset(mem, 0, refArrayOopDesc::object_size(element_count));
279
280 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize.
281 if (UseCompactObjectHeaders) {
282 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header());
283 } else {
284 assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
285 oopDesc::set_mark(mem, markWord::prototype());
286 oopDesc::release_set_klass(mem, Universe::objectArrayKlass());
287 }
288 arrayOopDesc::set_length(mem, element_count);
289 return objArrayOop(cast_to_oop(mem));
290 }
291
292 void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
293 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
294 if (UseCompressedOops) {
295 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
296 } else {
297 *segment->obj_at_addr<oop>(index) = root;
298 }
299 }
300
301 void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
302 // Depending on the number of classes we are archiving, a single roots array may be
303 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
304 // allows us to chop the large array into a series of "segments". Current layout
305 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end
306 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT.
307 // This is simple and efficient. We do not need filler objects anywhere between the segments,
308 // or immediately after the last segment. This allows starting the object dump immediately
309 // after the roots.
310
311 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0,
312 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used);
313
314 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize);
315 assert(refArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT,
316 "Should match exactly");
317
318 HeapRootSegments segments(_buffer_used,
319 roots->length(),
320 MIN_GC_REGION_ALIGNMENT,
321 max_elem_count);
322
323 int root_index = 0;
324 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) {
325 int size_elems = segments.size_in_elems(seg_idx);
326 size_t size_bytes = segments.size_in_bytes(seg_idx);
327
328 size_t oop_offset = _buffer_used;
329 _buffer_used = oop_offset + size_bytes;
330 ensure_buffer_space(_buffer_used);
331
332 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0,
333 "Roots segment %zu start is not aligned: %zu",
334 segments.count(), oop_offset);
335
414 info->set_buffer_offset(buffer_offset);
415 assert(buffer_offset <= 0x7fffffff, "sanity");
416
417 OopHandle handle(Universe::vm_global(), src_obj);
418 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
419 _buffer_offset_to_source_obj_table->maybe_grow();
420
421 if (java_lang_Module::is_instance(src_obj)) {
422 Modules::check_archived_module_oop(src_obj);
423 }
424 }
425
426 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
427 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
428 log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
429 log_info(aot)(" packages = %8zu", _num_packages);
430 log_info(aot)(" protection domains = %8zu", _num_protection_domains);
431 }
432
433 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
434 size_t byte_size = refArrayOopDesc::object_size(length) * HeapWordSize;
435 return byte_size;
436 }
437
438 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
439 assert(is_object_aligned(fill_bytes), "must be");
440 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
441
442 int initial_length = to_array_length(fill_bytes / elemSize);
443 for (int length = initial_length; length >= 0; length --) {
444 size_t array_byte_size = filler_array_byte_size(length);
445 if (array_byte_size == fill_bytes) {
446 return length;
447 }
448 }
449
450 ShouldNotReachHere();
451 return -1;
452 }
453
454 HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
455 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
456 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
457 memset(mem, 0, fill_bytes);
458 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
459 if (UseCompactObjectHeaders) {
460 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk));
461 } else {
462 assert(!Arguments::is_valhalla_enabled() || Universe::objectArrayKlass()->prototype_header() == markWord::prototype(), "should be the same");
463 oopDesc::set_mark(mem, markWord::prototype());
464 cast_to_oop(mem)->set_narrow_klass(nk);
465 }
466 arrayOopDesc::set_length(mem, array_length);
467 return mem;
468 }
469
470 void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
471 // We fill only with arrays (so we don't need to use a single HeapWord filler if the
472 // leftover space is smaller than a zero-sized array object). Therefore, we need to
473 // make sure there's enough space of min_filler_byte_size in the current region after
474 // required_byte_size has been allocated. If not, fill the remainder of the current
475 // region.
476 size_t min_filler_byte_size = filler_array_byte_size(0);
477 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
478
479 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
480 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
481
482 if (cur_min_region_bottom != next_min_region_bottom) {
714 address requested_region_bottom;
715
716 assert(request_p >= (T*)_requested_bottom, "sanity");
717 assert(request_p < (T*)_requested_top, "sanity");
718 requested_region_bottom = _requested_bottom;
719
720 // Mark the pointer in the oopmap
721 T* region_bottom = (T*)requested_region_bottom;
722 assert(request_p >= region_bottom, "must be");
723 BitMap::idx_t idx = request_p - region_bottom;
724 assert(idx < oopmap->size(), "overflow");
725 oopmap->set_bit(idx);
726 }
727
728 void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
729 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
730 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
731
732 oop fake_oop = cast_to_oop(buffered_addr);
733 if (UseCompactObjectHeaders) {
734 markWord prototype_header = src_klass->prototype_header().set_narrow_klass(nk);
735 fake_oop->set_mark(prototype_header);
736 } else {
737 fake_oop->set_narrow_klass(nk);
738 }
739
740 if (src_obj == nullptr) {
741 return;
742 }
743 // We need to retain the identity_hash, because it may have been used by some hashtables
744 // in the shared heap.
745 if (!src_obj->fast_no_hash_check() && (!(Arguments::is_valhalla_enabled() && src_obj->mark().is_inline_type()))) {
746 intptr_t src_hash = src_obj->identity_hash();
747 if (UseCompactObjectHeaders) {
748 fake_oop->set_mark(fake_oop->mark().copy_set_hash(src_hash));
749 } else if (Arguments::is_valhalla_enabled()) {
750 fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash));
751 } else {
752 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
753 }
754 assert(fake_oop->mark().is_unlocked(), "sanity");
755
756 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
757 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
758 }
759 // Strip age bits.
760 fake_oop->set_mark(fake_oop->mark().set_age(0));
761 }
762
763 class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
764 oop _src_obj;
765 address _buffered_obj;
766 CHeapBitMap* _oopmap;
767 bool _is_java_lang_ref;
768 public:
769 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
770 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap)
|