47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 // The following are offsets from buffer_bottom()
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
66
67 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
68 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
69 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
70
71 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
72 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
73
74 DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
75
76 typedef HashTable<
77 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
78 size_t, // size of this filler (in bytes)
79 127, // prime number
80 AnyObj::C_HEAP,
81 mtClassShared> FillersTable;
82 static FillersTable* _fillers;
83 static int _num_native_ptrs = 0;
84
85 void AOTMappedHeapWriter::init() {
86 if (CDSConfig::is_dumping_heap()) {
87 Universe::heap()->collect(GCCause::_java_lang_system_gc);
88
89 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
90 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
91 _fillers = new FillersTable();
92 _requested_bottom = nullptr;
93 _requested_top = nullptr;
94
95 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
96 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
97
374 _source_objs_order->append(os);
375 }
376 log_info(aot)("computed ranks");
377 _source_objs_order->sort(compare_objs_by_oop_fields);
378 log_info(aot)("sorting heap objects done");
379 }
380
381 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
382 // There could be multiple root segments, which we want to be aligned by region.
383 // Putting them ahead of objects makes sure we waste no space.
384 copy_roots_to_buffer(roots);
385
386 sort_source_objs();
387 for (int i = 0; i < _source_objs_order->length(); i++) {
388 int src_obj_index = _source_objs_order->at(i)._index;
389 oop src_obj = _source_objs->at(src_obj_index);
390 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
391 assert(info != nullptr, "must be");
392 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
393 info->set_buffer_offset(buffer_offset);
394
395 OopHandle handle(Universe::vm_global(), src_obj);
396 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
397 _buffer_offset_to_source_obj_table->maybe_grow();
398
399 if (java_lang_Module::is_instance(src_obj)) {
400 Modules::check_archived_module_oop(src_obj);
401 }
402 }
403
404 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
405 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
406 }
407
408 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
409 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
410 return byte_size;
411 }
412
413 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
414 assert(is_object_aligned(fill_bytes), "must be");
415 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
416
417 int initial_length = to_array_length(fill_bytes / elemSize);
418 for (int length = initial_length; length >= 0; length --) {
419 size_t array_byte_size = filler_array_byte_size(length);
420 if (array_byte_size == fill_bytes) {
421 return length;
422 }
423 }
424
425 ShouldNotReachHere();
474 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
475 }
476 }
477
478 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
479 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
480 if (p != nullptr) {
481 assert(*p > 0, "filler must be larger than zero bytes");
482 return *p;
483 } else {
484 return 0; // buffered_addr is not a filler
485 }
486 }
487
488 template <typename T>
489 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
490 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
491 *field_addr = value;
492 }
493
494 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
495 assert(!is_too_large_to_archive(src_obj), "already checked");
496 size_t byte_size = src_obj->size() * HeapWordSize;
497 assert(byte_size > 0, "no zero-size objects");
498
499 // For region-based collectors such as G1, the archive heap may be mapped into
500 // multiple regions. We need to make sure that we don't have an object that can possible
501 // span across two regions.
502 maybe_fill_gc_region_gap(byte_size);
503
504 size_t new_used = _buffer_used + byte_size;
505 assert(new_used > _buffer_used, "no wrap around");
506
507 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
508 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
509 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
510
511 ensure_buffer_space(new_used);
512
513 address from = cast_from_oop<address>(src_obj);
514 address to = offset_to_buffered_address<address>(_buffer_used);
795 // Mark this field in the bitmap
796 BitMap::idx_t idx = requested_field_addr - bottom;
797 heap_info->ptrmap()->set_bit(idx);
798 num_non_null_ptrs ++;
799 max_idx = MAX2(max_idx, idx);
800
801 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
802 // this address if the RO/RW regions are mapped at the default location).
803
804 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
805 Metadata* native_ptr = *buffered_field_addr;
806 guarantee(native_ptr != nullptr, "sanity");
807
808 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
809 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
810 }
811
812 guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
813 "Metadata %p should have been archived", native_ptr);
814
815 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
816 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
817 *buffered_field_addr = (Metadata*)requested_native_ptr;
818 }
819
820 heap_info->ptrmap()->resize(max_idx + 1);
821 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
822 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
823 }
824
825 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
826 class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
827 private:
828 address _current;
829 address _next;
830
831 address _buffer_start;
832 address _buffer_end;
833 uint64_t _buffer_start_narrow_oop;
834 intptr_t _buffer_to_requested_delta;
|
47 #include "runtime/mutexLocker.hpp"
48 #include "utilities/bitMap.inline.hpp"
49 #if INCLUDE_G1GC
50 #include "gc/g1/g1CollectedHeap.hpp"
51 #include "gc/g1/g1HeapRegion.hpp"
52 #endif
53
54 #if INCLUDE_CDS_JAVA_HEAP
55
56 GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
57
58 // The following are offsets from buffer_bottom()
59 size_t AOTMappedHeapWriter::_buffer_used;
60
61 // Heap root segments
62 HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
63
64 address AOTMappedHeapWriter::_requested_bottom;
65 address AOTMappedHeapWriter::_requested_top;
66
67 static size_t _num_strings = 0;
68 static size_t _string_bytes = 0;
69 static size_t _num_packages = 0;
70 static size_t _num_protection_domains = 0;
71
72 GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
73 GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
74 GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
75
76 AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
77 AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
78
79 DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
80
81 typedef HashTable<
82 size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
83 size_t, // size of this filler (in bytes)
84 127, // prime number
85 AnyObj::C_HEAP,
86 mtClassShared> FillersTable;
87 static FillersTable* _fillers;
88 static int _num_native_ptrs = 0;
89
90 void AOTMappedHeapWriter::init() {
91 if (CDSConfig::is_dumping_heap()) {
92 Universe::heap()->collect(GCCause::_java_lang_system_gc);
93
94 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
95 _dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
96 _fillers = new FillersTable();
97 _requested_bottom = nullptr;
98 _requested_top = nullptr;
99
100 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
101 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
102
379 _source_objs_order->append(os);
380 }
381 log_info(aot)("computed ranks");
382 _source_objs_order->sort(compare_objs_by_oop_fields);
383 log_info(aot)("sorting heap objects done");
384 }
385
386 void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
387 // There could be multiple root segments, which we want to be aligned by region.
388 // Putting them ahead of objects makes sure we waste no space.
389 copy_roots_to_buffer(roots);
390
391 sort_source_objs();
392 for (int i = 0; i < _source_objs_order->length(); i++) {
393 int src_obj_index = _source_objs_order->at(i)._index;
394 oop src_obj = _source_objs->at(src_obj_index);
395 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
396 assert(info != nullptr, "must be");
397 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
398 info->set_buffer_offset(buffer_offset);
399 assert(buffer_offset <= 0x7fffffff, "sanity");
400 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
401
402 OopHandle handle(Universe::vm_global(), src_obj);
403 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
404 _buffer_offset_to_source_obj_table->maybe_grow();
405
406 if (java_lang_Module::is_instance(src_obj)) {
407 Modules::check_archived_module_oop(src_obj);
408 }
409 }
410
411 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
412 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
413 log_info(cds)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
414 log_info(cds)(" packages = %8zu", _num_packages);
415 log_info(cds)(" protection domains = %8zu", _num_protection_domains);
416 }
417
418 size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
419 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
420 return byte_size;
421 }
422
423 int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
424 assert(is_object_aligned(fill_bytes), "must be");
425 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
426
427 int initial_length = to_array_length(fill_bytes / elemSize);
428 for (int length = initial_length; length >= 0; length --) {
429 size_t array_byte_size = filler_array_byte_size(length);
430 if (array_byte_size == fill_bytes) {
431 return length;
432 }
433 }
434
435 ShouldNotReachHere();
484 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
485 }
486 }
487
488 size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
489 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
490 if (p != nullptr) {
491 assert(*p > 0, "filler must be larger than zero bytes");
492 return *p;
493 } else {
494 return 0; // buffered_addr is not a filler
495 }
496 }
497
498 template <typename T>
499 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
500 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
501 *field_addr = value;
502 }
503
504 void AOTMappedHeapWriter::update_stats(oop src_obj) {
505 if (java_lang_String::is_instance(src_obj)) {
506 _num_strings ++;
507 _string_bytes += src_obj->size() * HeapWordSize;
508 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
509 } else {
510 Klass* k = src_obj->klass();
511 Symbol* name = k->name();
512 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
513 _num_packages ++;
514 } else if (name->equals("java/security/ProtectionDomain")) {
515 _num_protection_domains ++;
516 }
517 }
518 }
519
520 size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
521 update_stats(src_obj);
522
523 assert(!is_too_large_to_archive(src_obj), "already checked");
524 size_t byte_size = src_obj->size() * HeapWordSize;
525 assert(byte_size > 0, "no zero-size objects");
526
527 // For region-based collectors such as G1, the archive heap may be mapped into
528 // multiple regions. We need to make sure that we don't have an object that can possible
529 // span across two regions.
530 maybe_fill_gc_region_gap(byte_size);
531
532 size_t new_used = _buffer_used + byte_size;
533 assert(new_used > _buffer_used, "no wrap around");
534
535 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
536 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
537 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
538
539 ensure_buffer_space(new_used);
540
541 address from = cast_from_oop<address>(src_obj);
542 address to = offset_to_buffered_address<address>(_buffer_used);
823 // Mark this field in the bitmap
824 BitMap::idx_t idx = requested_field_addr - bottom;
825 heap_info->ptrmap()->set_bit(idx);
826 num_non_null_ptrs ++;
827 max_idx = MAX2(max_idx, idx);
828
829 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
830 // this address if the RO/RW regions are mapped at the default location).
831
832 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
833 Metadata* native_ptr = *buffered_field_addr;
834 guarantee(native_ptr != nullptr, "sanity");
835
836 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
837 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
838 }
839
840 guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
841 "Metadata %p should have been archived", native_ptr);
842
843 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
844 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
845 }
846
847 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
848 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
849 *buffered_field_addr = (Metadata*)requested_native_ptr;
850 }
851
852 heap_info->ptrmap()->resize(max_idx + 1);
853 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
854 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
855 }
856
857 AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
858 class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
859 private:
860 address _current;
861 address _next;
862
863 address _buffer_start;
864 address _buffer_end;
865 uint64_t _buffer_start_narrow_oop;
866 intptr_t _buffer_to_requested_delta;
|