10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotReferenceObjSupport.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "classfile/javaClasses.hpp"
31 #include "classfile/modules.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "memory/iterator.inline.hpp"
35 #include "memory/oopFactory.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/compressedOops.hpp"
38 #include "oops/objArrayOop.inline.hpp"
39 #include "oops/oop.inline.hpp"
40 #include "oops/oopHandle.inline.hpp"
41 #include "oops/typeArrayKlass.hpp"
42 #include "oops/typeArrayOop.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #if INCLUDE_G1GC
47 #include "gc/g1/g1CollectedHeap.hpp"
48 #include "gc/g1/g1HeapRegion.hpp"
49 #endif
50
51 #if INCLUDE_CDS_JAVA_HEAP
52
53 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
54
55 // The following are offsets from buffer_bottom()
56 size_t ArchiveHeapWriter::_buffer_used;
57
58 // Heap root segments
59 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
60
61 address ArchiveHeapWriter::_requested_bottom;
62 address ArchiveHeapWriter::_requested_top;
63
64 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
65 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
66 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
67
68 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
69 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
70
71
72 typedef ResourceHashtable<
73 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
74 size_t, // size of this filler (in bytes)
75 127, // prime number
76 AnyObj::C_HEAP,
77 mtClassShared> FillersTable;
78 static FillersTable* _fillers;
79 static int _num_native_ptrs = 0;
80
81 void ArchiveHeapWriter::init() {
82 if (CDSConfig::is_dumping_heap()) {
83 Universe::heap()->collect(GCCause::_java_lang_system_gc);
84
85 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
86 _fillers = new FillersTable();
87 _requested_bottom = nullptr;
88 _requested_top = nullptr;
89
90 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
91 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
92
93 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
94 }
95 }
96
97 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
98 _source_objs->append(src_obj);
99 }
100
101 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
102 ArchiveHeapInfo* heap_info) {
103 assert(CDSConfig::is_dumping_heap(), "sanity");
104 allocate_buffer();
105 copy_source_objs_to_buffer(roots);
106 set_requested_address(heap_info);
107 relocate_embedded_oops(roots, heap_info);
108 }
109
110 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
111 return is_too_large_to_archive(o->size());
112 }
113
114 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
115 typeArrayOop value = java_lang_String::value_no_keepalive(string);
116 return is_too_large_to_archive(value);
127 } else {
128 return false;
129 }
130 }
131
132 // Various lookup functions between source_obj, buffered_obj and requested_obj
133 bool ArchiveHeapWriter::is_in_requested_range(oop o) {
134 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
135 address a = cast_from_oop<address>(o);
136 return (_requested_bottom <= a && a < _requested_top);
137 }
138
139 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
140 oop req_obj = cast_to_oop(_requested_bottom + offset);
141 assert(is_in_requested_range(req_obj), "must be");
142 return req_obj;
143 }
144
145 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
146 assert(CDSConfig::is_dumping_heap(), "dump-time only");
147 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
148 if (p != nullptr) {
149 return requested_obj_from_buffer_offset(p->buffer_offset());
150 } else {
151 return nullptr;
152 }
153 }
154
155 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
156 oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
157 if (p != nullptr) {
158 return *p;
159 } else {
160 return nullptr;
161 }
162 }
163
164 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
165 return _requested_bottom + buffered_address_to_offset(buffered_addr);
166 }
167
168 address ArchiveHeapWriter::requested_address() {
169 assert(_buffer != nullptr, "must be initialized");
170 return _requested_bottom;
171 }
172
173 void ArchiveHeapWriter::allocate_buffer() {
174 int initial_buffer_size = 100000;
175 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
176 _buffer_used = 0;
177 ensure_buffer_space(1); // so that buffer_bottom() works
178 }
300 for (int i = 0; i < len; i++) {
301 oop o = _source_objs->at(i);
302 int rank = oop_sorting_rank(o);
303 HeapObjOrder os = {i, rank};
304 _source_objs_order->append(os);
305 }
306 log_info(aot)("computed ranks");
307 _source_objs_order->sort(compare_objs_by_oop_fields);
308 log_info(aot)("sorting heap objects done");
309 }
310
311 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
312 // There could be multiple root segments, which we want to be aligned by region.
313 // Putting them ahead of objects makes sure we waste no space.
314 copy_roots_to_buffer(roots);
315
316 sort_source_objs();
317 for (int i = 0; i < _source_objs_order->length(); i++) {
318 int src_obj_index = _source_objs_order->at(i)._index;
319 oop src_obj = _source_objs->at(src_obj_index);
320 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
321 assert(info != nullptr, "must be");
322 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
323 info->set_buffer_offset(buffer_offset);
324
325 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
326 _buffer_offset_to_source_obj_table->maybe_grow();
327
328 if (java_lang_Module::is_instance(src_obj)) {
329 Modules::check_archived_module_oop(src_obj);
330 }
331 }
332
333 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
334 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
335 }
336
337 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
338 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
339 return byte_size;
340 }
341
342 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
343 assert(is_object_aligned(fill_bytes), "must be");
344 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
345
346 int initial_length = to_array_length(fill_bytes / elemSize);
347 for (int length = initial_length; length >= 0; length --) {
348 size_t array_byte_size = filler_array_byte_size(length);
349 if (array_byte_size == fill_bytes) {
350 return length;
351 }
352 }
353
354 ShouldNotReachHere();
403 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
404 }
405 }
406
407 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
408 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
409 if (p != nullptr) {
410 assert(*p > 0, "filler must be larger than zero bytes");
411 return *p;
412 } else {
413 return 0; // buffered_addr is not a filler
414 }
415 }
416
417 template <typename T>
418 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
419 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
420 *field_addr = value;
421 }
422
423 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
424 assert(!is_too_large_to_archive(src_obj), "already checked");
425 size_t byte_size = src_obj->size() * HeapWordSize;
426 assert(byte_size > 0, "no zero-size objects");
427
428 // For region-based collectors such as G1, the archive heap may be mapped into
429 // multiple regions. We need to make sure that we don't have an object that can possible
430 // span across two regions.
431 maybe_fill_gc_region_gap(byte_size);
432
433 size_t new_used = _buffer_used + byte_size;
434 assert(new_used > _buffer_used, "no wrap around");
435
436 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
437 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
438 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
439
440 ensure_buffer_space(new_used);
441
442 address from = cast_from_oop<address>(src_obj);
443 address to = offset_to_buffered_address<address>(_buffer_used);
636 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
637 size_t start = bitmap->find_first_set_bit(0);
638 size_t end = bitmap->size();
639 log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
640 start, end,
641 start * 100 / total_bits,
642 end * 100 / total_bits,
643 (end - start) * 100 / total_bits);
644 }
645
646 // Update all oop fields embedded in the buffered objects
647 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
648 ArchiveHeapInfo* heap_info) {
649 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
650 size_t heap_region_byte_size = _buffer_used;
651 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
652
653 for (int i = 0; i < _source_objs_order->length(); i++) {
654 int src_obj_index = _source_objs_order->at(i)._index;
655 oop src_obj = _source_objs->at(src_obj_index);
656 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
657 assert(info != nullptr, "must be");
658 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
659 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
660 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
661 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
662 src_obj->oop_iterate(&relocator);
663 };
664
665 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
666 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
667 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
668 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
669
670 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
671 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
672 address buffered_obj = offset_to_buffered_address<address>(seg_offset);
673 int length = _heap_root_segments.size_in_elems(seg_idx);
674
675 if (UseCompressedOops) {
676 for (int i = 0; i < length; i++) {
688 compute_ptrmap(heap_info);
689
690 size_t total_bytes = (size_t)_buffer->length();
691 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
692 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
693 }
694
695 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
696 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
697 if (ptr != nullptr) {
698 NativePointerInfo info;
699 info._src_obj = src_obj;
700 info._field_offset = field_offset;
701 _native_pointers->append(info);
702 HeapShared::set_has_native_pointers(src_obj);
703 _num_native_ptrs ++;
704 }
705 }
706
707 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
708 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
709 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
710 assert(p != nullptr, "must be");
711
712 // requested_field_addr = the address of this field in the requested space
713 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
714 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
715 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
716
717 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
718 // Leading zeros have been removed so some addresses may not be in the ptrmap
719 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
720 if (idx < start_pos) {
721 return false;
722 } else {
723 idx -= start_pos;
724 }
725 return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true);
726 }
727
728 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
729 int num_non_null_ptrs = 0;
730 Metadata** bottom = (Metadata**) _requested_bottom;
731 Metadata** top = (Metadata**) _requested_top; // exclusive
732 heap_info->ptrmap()->resize(top - bottom);
733
734 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
735 for (int i = 0; i < _native_pointers->length(); i++) {
736 NativePointerInfo info = _native_pointers->at(i);
737 oop src_obj = info._src_obj;
738 int field_offset = info._field_offset;
739 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
740 // requested_field_addr = the address of this field in the requested space
741 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
742 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
743 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
744
745 // Mark this field in the bitmap
746 BitMap::idx_t idx = requested_field_addr - bottom;
747 heap_info->ptrmap()->set_bit(idx);
748 num_non_null_ptrs ++;
749 max_idx = MAX2(max_idx, idx);
750
751 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
752 // this address if the RO/RW regions are mapped at the default location).
753
754 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
755 Metadata* native_ptr = *buffered_field_addr;
756 guarantee(native_ptr != nullptr, "sanity");
757 guarantee(ArchiveBuilder::current()->has_been_buffered((address)native_ptr),
758 "Metadata %p should have been archived", native_ptr);
759
760 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
761 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
762 *buffered_field_addr = (Metadata*)requested_native_ptr;
763 }
764
765 heap_info->ptrmap()->resize(max_idx + 1);
766 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
767 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
768 }
769
770 #endif // INCLUDE_CDS_JAVA_HEAP
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "cds/aotReferenceObjSupport.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "cds/regeneratedClasses.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "classfile/modules.hpp"
33 #include "classfile/systemDictionary.hpp"
34 #include "gc/shared/collectedHeap.hpp"
35 #include "memory/iterator.inline.hpp"
36 #include "memory/oopFactory.hpp"
37 #include "memory/universe.hpp"
38 #include "oops/compressedOops.hpp"
39 #include "oops/objArrayOop.inline.hpp"
40 #include "oops/oop.inline.hpp"
41 #include "oops/oopHandle.inline.hpp"
42 #include "oops/typeArrayKlass.hpp"
43 #include "oops/typeArrayOop.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/mutexLocker.hpp"
46 #include "utilities/bitMap.inline.hpp"
47 #if INCLUDE_G1GC
48 #include "gc/g1/g1CollectedHeap.hpp"
49 #include "gc/g1/g1HeapRegion.hpp"
50 #endif
51
52 #if INCLUDE_CDS_JAVA_HEAP
53
54 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
55
56 // The following are offsets from buffer_bottom()
57 size_t ArchiveHeapWriter::_buffer_used;
58
59 // Heap root segments
60 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
61
62 address ArchiveHeapWriter::_requested_bottom;
63 address ArchiveHeapWriter::_requested_top;
64
65 static size_t _num_strings = 0;
66 static size_t _string_bytes = 0;
67 static size_t _num_packages = 0;
68 static size_t _num_protection_domains = 0;
69
70 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
71 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
72 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
73
74 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
75 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
76
77
78 typedef ResourceHashtable<
79 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
80 size_t, // size of this filler (in bytes)
81 127, // prime number
82 AnyObj::C_HEAP,
83 mtClassShared> FillersTable;
84 static FillersTable* _fillers;
85 static int _num_native_ptrs = 0;
86
87 void ArchiveHeapWriter::init() {
88 if (CDSConfig::is_dumping_heap()) {
89 Universe::heap()->collect(GCCause::_java_lang_system_gc);
90
91 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
92 _fillers = new FillersTable();
93 _requested_bottom = nullptr;
94 _requested_top = nullptr;
95
96 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
97 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
98
99 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
100 }
101 }
102
103 void ArchiveHeapWriter::delete_tables_with_raw_oops() {
104 delete _source_objs;
105 _source_objs = nullptr;
106 }
107
108 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
109 _source_objs->append(src_obj);
110 }
111
112 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
113 ArchiveHeapInfo* heap_info) {
114 assert(CDSConfig::is_dumping_heap(), "sanity");
115 allocate_buffer();
116 copy_source_objs_to_buffer(roots);
117 set_requested_address(heap_info);
118 relocate_embedded_oops(roots, heap_info);
119 }
120
121 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
122 return is_too_large_to_archive(o->size());
123 }
124
125 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
126 typeArrayOop value = java_lang_String::value_no_keepalive(string);
127 return is_too_large_to_archive(value);
138 } else {
139 return false;
140 }
141 }
142
143 // Various lookup functions between source_obj, buffered_obj and requested_obj
144 bool ArchiveHeapWriter::is_in_requested_range(oop o) {
145 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
146 address a = cast_from_oop<address>(o);
147 return (_requested_bottom <= a && a < _requested_top);
148 }
149
150 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
151 oop req_obj = cast_to_oop(_requested_bottom + offset);
152 assert(is_in_requested_range(req_obj), "must be");
153 return req_obj;
154 }
155
156 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
157 assert(CDSConfig::is_dumping_heap(), "dump-time only");
158 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
159 if (p != nullptr) {
160 return requested_obj_from_buffer_offset(p->buffer_offset());
161 } else {
162 return nullptr;
163 }
164 }
165
166 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
167 OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
168 if (oh != nullptr) {
169 return oh->resolve();
170 } else {
171 return nullptr;
172 }
173 }
174
175 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
176 return _requested_bottom + buffered_address_to_offset(buffered_addr);
177 }
178
179 address ArchiveHeapWriter::requested_address() {
180 assert(_buffer != nullptr, "must be initialized");
181 return _requested_bottom;
182 }
183
184 void ArchiveHeapWriter::allocate_buffer() {
185 int initial_buffer_size = 100000;
186 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
187 _buffer_used = 0;
188 ensure_buffer_space(1); // so that buffer_bottom() works
189 }
311 for (int i = 0; i < len; i++) {
312 oop o = _source_objs->at(i);
313 int rank = oop_sorting_rank(o);
314 HeapObjOrder os = {i, rank};
315 _source_objs_order->append(os);
316 }
317 log_info(aot)("computed ranks");
318 _source_objs_order->sort(compare_objs_by_oop_fields);
319 log_info(aot)("sorting heap objects done");
320 }
321
322 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
323 // There could be multiple root segments, which we want to be aligned by region.
324 // Putting them ahead of objects makes sure we waste no space.
325 copy_roots_to_buffer(roots);
326
327 sort_source_objs();
328 for (int i = 0; i < _source_objs_order->length(); i++) {
329 int src_obj_index = _source_objs_order->at(i)._index;
330 oop src_obj = _source_objs->at(src_obj_index);
331 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
332 assert(info != nullptr, "must be");
333 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
334 info->set_buffer_offset(buffer_offset);
335 assert(buffer_offset <= 0x7fffffff, "sanity");
336 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
337
338 OopHandle handle(Universe::vm_global(), src_obj);
339 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
340 _buffer_offset_to_source_obj_table->maybe_grow();
341
342 if (java_lang_Module::is_instance(src_obj)) {
343 Modules::check_archived_module_oop(src_obj);
344 }
345 }
346
347 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
348 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
349 log_info(cds)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
350 log_info(cds)(" packages = %8zu", _num_packages);
351 log_info(cds)(" protection domains = %8zu", _num_protection_domains);
352 }
353
354 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
355 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
356 return byte_size;
357 }
358
359 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
360 assert(is_object_aligned(fill_bytes), "must be");
361 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
362
363 int initial_length = to_array_length(fill_bytes / elemSize);
364 for (int length = initial_length; length >= 0; length --) {
365 size_t array_byte_size = filler_array_byte_size(length);
366 if (array_byte_size == fill_bytes) {
367 return length;
368 }
369 }
370
371 ShouldNotReachHere();
420 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
421 }
422 }
423
424 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
425 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
426 if (p != nullptr) {
427 assert(*p > 0, "filler must be larger than zero bytes");
428 return *p;
429 } else {
430 return 0; // buffered_addr is not a filler
431 }
432 }
433
434 template <typename T>
435 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
436 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
437 *field_addr = value;
438 }
439
440 void ArchiveHeapWriter::update_stats(oop src_obj) {
441 if (java_lang_String::is_instance(src_obj)) {
442 _num_strings ++;
443 _string_bytes += src_obj->size() * HeapWordSize;
444 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
445 } else {
446 Klass* k = src_obj->klass();
447 Symbol* name = k->name();
448 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
449 _num_packages ++;
450 } else if (name->equals("java/security/ProtectionDomain")) {
451 _num_protection_domains ++;
452 }
453 }
454 }
455
456 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
457 update_stats(src_obj);
458
459 assert(!is_too_large_to_archive(src_obj), "already checked");
460 size_t byte_size = src_obj->size() * HeapWordSize;
461 assert(byte_size > 0, "no zero-size objects");
462
463 // For region-based collectors such as G1, the archive heap may be mapped into
464 // multiple regions. We need to make sure that we don't have an object that can possible
465 // span across two regions.
466 maybe_fill_gc_region_gap(byte_size);
467
468 size_t new_used = _buffer_used + byte_size;
469 assert(new_used > _buffer_used, "no wrap around");
470
471 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
472 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
473 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
474
475 ensure_buffer_space(new_used);
476
477 address from = cast_from_oop<address>(src_obj);
478 address to = offset_to_buffered_address<address>(_buffer_used);
671 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
672 size_t start = bitmap->find_first_set_bit(0);
673 size_t end = bitmap->size();
674 log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
675 start, end,
676 start * 100 / total_bits,
677 end * 100 / total_bits,
678 (end - start) * 100 / total_bits);
679 }
680
681 // Update all oop fields embedded in the buffered objects
682 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
683 ArchiveHeapInfo* heap_info) {
684 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
685 size_t heap_region_byte_size = _buffer_used;
686 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
687
688 for (int i = 0; i < _source_objs_order->length(); i++) {
689 int src_obj_index = _source_objs_order->at(i)._index;
690 oop src_obj = _source_objs->at(src_obj_index);
691 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
692 assert(info != nullptr, "must be");
693 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
694 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
695 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
696 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
697 src_obj->oop_iterate(&relocator);
698 };
699
700 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
701 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
702 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) {
703 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx);
704
705 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset);
706 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass());
707 address buffered_obj = offset_to_buffered_address<address>(seg_offset);
708 int length = _heap_root_segments.size_in_elems(seg_idx);
709
710 if (UseCompressedOops) {
711 for (int i = 0; i < length; i++) {
723 compute_ptrmap(heap_info);
724
725 size_t total_bytes = (size_t)_buffer->length();
726 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
727 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
728 }
729
730 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
731 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
732 if (ptr != nullptr) {
733 NativePointerInfo info;
734 info._src_obj = src_obj;
735 info._field_offset = field_offset;
736 _native_pointers->append(info);
737 HeapShared::set_has_native_pointers(src_obj);
738 _num_native_ptrs ++;
739 }
740 }
741
742 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
743 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, address buffered_obj, int field_offset) {
744 size_t offset = buffered_address_to_offset(buffered_obj) + checked_cast<size_t>(field_offset); // in bytes
745 BitMap::idx_t idx = checked_cast<BitMap::idx_t>(offset) / HeapWordSize;
746 // Leading zeros have been removed so some addresses may not be in the ptrmap
747 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
748 if (idx < start_pos) {
749 return false;
750 } else {
751 idx -= start_pos;
752 }
753 return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true);
754 }
755
756 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
757 int num_non_null_ptrs = 0;
758 Metadata** bottom = (Metadata**) _requested_bottom;
759 Metadata** top = (Metadata**) _requested_top; // exclusive
760 heap_info->ptrmap()->resize(top - bottom);
761
762 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
763 for (int i = 0; i < _native_pointers->length(); i++) {
764 NativePointerInfo info = _native_pointers->at(i);
765 oop src_obj = info._src_obj;
766 int field_offset = info._field_offset;
767 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
768 // requested_field_addr = the address of this field in the requested space
769 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
770 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
771 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
772
773 // Mark this field in the bitmap
774 BitMap::idx_t idx = requested_field_addr - bottom;
775 heap_info->ptrmap()->set_bit(idx);
776 num_non_null_ptrs ++;
777 max_idx = MAX2(max_idx, idx);
778
779 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
780 // this address if the RO/RW regions are mapped at the default location).
781
782 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
783 Metadata* native_ptr = *buffered_field_addr;
784 guarantee(native_ptr != nullptr, "sanity");
785 guarantee(ArchiveBuilder::current()->has_been_buffered((address)native_ptr),
786 "Metadata %p should have been archived", native_ptr);
787
788 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
789 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
790 }
791
792 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
793 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
794 *buffered_field_addr = (Metadata*)requested_native_ptr;
795 }
796
797 heap_info->ptrmap()->resize(max_idx + 1);
798 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)",
799 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
800 }
801
802 #endif // INCLUDE_CDS_JAVA_HEAP
|