45 #include "runtime/mutexLocker.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #if INCLUDE_G1GC
48 #include "gc/g1/g1CollectedHeap.hpp"
49 #include "gc/g1/g1HeapRegion.hpp"
50 #endif
51
52 #if INCLUDE_CDS_JAVA_HEAP
53
54 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
55
56 // The following are offsets from buffer_bottom()
57 size_t ArchiveHeapWriter::_buffer_used;
58
59 // Heap root segments
60 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
61
62 address ArchiveHeapWriter::_requested_bottom;
63 address ArchiveHeapWriter::_requested_top;
64
65 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
66 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
67 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
68
69 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
70 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
71
72
73 typedef HashTable<
74 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
75 size_t, // size of this filler (in bytes)
76 127, // prime number
77 AnyObj::C_HEAP,
78 mtClassShared> FillersTable;
79 static FillersTable* _fillers;
80 static int _num_native_ptrs = 0;
81
82 void ArchiveHeapWriter::init() {
83 if (CDSConfig::is_dumping_heap()) {
84 Universe::heap()->collect(GCCause::_java_lang_system_gc);
348 _source_objs_order->append(os);
349 }
350 log_info(aot)("computed ranks");
351 _source_objs_order->sort(compare_objs_by_oop_fields);
352 log_info(aot)("sorting heap objects done");
353 }
354
355 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
356 // There could be multiple root segments, which we want to be aligned by region.
357 // Putting them ahead of objects makes sure we waste no space.
358 copy_roots_to_buffer(roots);
359
360 sort_source_objs();
361 for (int i = 0; i < _source_objs_order->length(); i++) {
362 int src_obj_index = _source_objs_order->at(i)._index;
363 oop src_obj = _source_objs->at(src_obj_index);
364 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
365 assert(info != nullptr, "must be");
366 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
367 info->set_buffer_offset(buffer_offset);
368
369 OopHandle handle(Universe::vm_global(), src_obj);
370 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
371 _buffer_offset_to_source_obj_table->maybe_grow();
372
373 if (java_lang_Module::is_instance(src_obj)) {
374 Modules::check_archived_module_oop(src_obj);
375 }
376 }
377
378 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
379 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
380 }
381
382 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
383 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
384 return byte_size;
385 }
386
387 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
388 assert(is_object_aligned(fill_bytes), "must be");
389 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
390
391 int initial_length = to_array_length(fill_bytes / elemSize);
392 for (int length = initial_length; length >= 0; length --) {
393 size_t array_byte_size = filler_array_byte_size(length);
394 if (array_byte_size == fill_bytes) {
395 return length;
396 }
397 }
398
399 ShouldNotReachHere();
448 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
449 }
450 }
451
452 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
453 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
454 if (p != nullptr) {
455 assert(*p > 0, "filler must be larger than zero bytes");
456 return *p;
457 } else {
458 return 0; // buffered_addr is not a filler
459 }
460 }
461
462 template <typename T>
463 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
464 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
465 *field_addr = value;
466 }
467
468 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
469 assert(!is_too_large_to_archive(src_obj), "already checked");
470 size_t byte_size = src_obj->size() * HeapWordSize;
471 assert(byte_size > 0, "no zero-size objects");
472
473 // For region-based collectors such as G1, the archive heap may be mapped into
474 // multiple regions. We need to make sure that we don't have an object that can possible
475 // span across two regions.
476 maybe_fill_gc_region_gap(byte_size);
477
478 size_t new_used = _buffer_used + byte_size;
479 assert(new_used > _buffer_used, "no wrap around");
480
481 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
482 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
483 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
484
485 ensure_buffer_space(new_used);
486
487 address from = cast_from_oop<address>(src_obj);
488 address to = offset_to_buffered_address<address>(_buffer_used);
773 // Mark this field in the bitmap
774 BitMap::idx_t idx = requested_field_addr - bottom;
775 heap_info->ptrmap()->set_bit(idx);
776 num_non_null_ptrs ++;
777 max_idx = MAX2(max_idx, idx);
778
779 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
780 // this address if the RO/RW regions are mapped at the default location).
781
782 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
783 Metadata* native_ptr = *buffered_field_addr;
784 guarantee(native_ptr != nullptr, "sanity");
785
786 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
787 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
788 }
789
790 guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
791 "Metadata %p should have been archived", native_ptr);
792
793 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
794 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
795 *buffered_field_addr = (Metadata*)requested_native_ptr;
796 }
797
798 heap_info->ptrmap()->resize(max_idx + 1);
799 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
800 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
801 }
802
803 #endif // INCLUDE_CDS_JAVA_HEAP
|
45 #include "runtime/mutexLocker.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #if INCLUDE_G1GC
48 #include "gc/g1/g1CollectedHeap.hpp"
49 #include "gc/g1/g1HeapRegion.hpp"
50 #endif
51
52 #if INCLUDE_CDS_JAVA_HEAP
53
54 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
55
56 // The following are offsets from buffer_bottom()
57 size_t ArchiveHeapWriter::_buffer_used;
58
59 // Heap root segments
60 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
61
62 address ArchiveHeapWriter::_requested_bottom;
63 address ArchiveHeapWriter::_requested_top;
64
65 static size_t _num_strings = 0;
66 static size_t _string_bytes = 0;
67 static size_t _num_packages = 0;
68 static size_t _num_protection_domains = 0;
69
70 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
71 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
72 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
73
74 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
75 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
76
77
78 typedef HashTable<
79 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
80 size_t, // size of this filler (in bytes)
81 127, // prime number
82 AnyObj::C_HEAP,
83 mtClassShared> FillersTable;
84 static FillersTable* _fillers;
85 static int _num_native_ptrs = 0;
86
87 void ArchiveHeapWriter::init() {
88 if (CDSConfig::is_dumping_heap()) {
89 Universe::heap()->collect(GCCause::_java_lang_system_gc);
353 _source_objs_order->append(os);
354 }
355 log_info(aot)("computed ranks");
356 _source_objs_order->sort(compare_objs_by_oop_fields);
357 log_info(aot)("sorting heap objects done");
358 }
359
360 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
361 // There could be multiple root segments, which we want to be aligned by region.
362 // Putting them ahead of objects makes sure we waste no space.
363 copy_roots_to_buffer(roots);
364
365 sort_source_objs();
366 for (int i = 0; i < _source_objs_order->length(); i++) {
367 int src_obj_index = _source_objs_order->at(i)._index;
368 oop src_obj = _source_objs->at(src_obj_index);
369 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
370 assert(info != nullptr, "must be");
371 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
372 info->set_buffer_offset(buffer_offset);
373 assert(buffer_offset <= 0x7fffffff, "sanity");
374 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
375
376 OopHandle handle(Universe::vm_global(), src_obj);
377 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
378 _buffer_offset_to_source_obj_table->maybe_grow();
379
380 if (java_lang_Module::is_instance(src_obj)) {
381 Modules::check_archived_module_oop(src_obj);
382 }
383 }
384
385 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
386 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
387 log_info(cds)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
388 log_info(cds)(" packages = %8zu", _num_packages);
389 log_info(cds)(" protection domains = %8zu", _num_protection_domains);
390 }
391
392 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
393 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
394 return byte_size;
395 }
396
397 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
398 assert(is_object_aligned(fill_bytes), "must be");
399 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
400
401 int initial_length = to_array_length(fill_bytes / elemSize);
402 for (int length = initial_length; length >= 0; length --) {
403 size_t array_byte_size = filler_array_byte_size(length);
404 if (array_byte_size == fill_bytes) {
405 return length;
406 }
407 }
408
409 ShouldNotReachHere();
458 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
459 }
460 }
461
462 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
463 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
464 if (p != nullptr) {
465 assert(*p > 0, "filler must be larger than zero bytes");
466 return *p;
467 } else {
468 return 0; // buffered_addr is not a filler
469 }
470 }
471
472 template <typename T>
473 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
474 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
475 *field_addr = value;
476 }
477
478 void ArchiveHeapWriter::update_stats(oop src_obj) {
479 if (java_lang_String::is_instance(src_obj)) {
480 _num_strings ++;
481 _string_bytes += src_obj->size() * HeapWordSize;
482 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
483 } else {
484 Klass* k = src_obj->klass();
485 Symbol* name = k->name();
486 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
487 _num_packages ++;
488 } else if (name->equals("java/security/ProtectionDomain")) {
489 _num_protection_domains ++;
490 }
491 }
492 }
493
494 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
495 update_stats(src_obj);
496
497 assert(!is_too_large_to_archive(src_obj), "already checked");
498 size_t byte_size = src_obj->size() * HeapWordSize;
499 assert(byte_size > 0, "no zero-size objects");
500
501 // For region-based collectors such as G1, the archive heap may be mapped into
502 // multiple regions. We need to make sure that we don't have an object that can possible
503 // span across two regions.
504 maybe_fill_gc_region_gap(byte_size);
505
506 size_t new_used = _buffer_used + byte_size;
507 assert(new_used > _buffer_used, "no wrap around");
508
509 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
510 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
511 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
512
513 ensure_buffer_space(new_used);
514
515 address from = cast_from_oop<address>(src_obj);
516 address to = offset_to_buffered_address<address>(_buffer_used);
801 // Mark this field in the bitmap
802 BitMap::idx_t idx = requested_field_addr - bottom;
803 heap_info->ptrmap()->set_bit(idx);
804 num_non_null_ptrs ++;
805 max_idx = MAX2(max_idx, idx);
806
807 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
808 // this address if the RO/RW regions are mapped at the default location).
809
810 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
811 Metadata* native_ptr = *buffered_field_addr;
812 guarantee(native_ptr != nullptr, "sanity");
813
814 if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
815 native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
816 }
817
818 guarantee(ArchiveBuilder::current()->has_been_archived((address)native_ptr),
819 "Metadata %p should have been archived", native_ptr);
820
821 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
822 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
823 }
824
825 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
826 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
827 *buffered_field_addr = (Metadata*)requested_native_ptr;
828 }
829
830 heap_info->ptrmap()->resize(max_idx + 1);
831 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
832 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
833 }
834
835 #endif // INCLUDE_CDS_JAVA_HEAP
|