1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "memory/iterator.inline.hpp"
33 #include "memory/oopFactory.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/compressedOops.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/objArrayOop.inline.hpp"
38 #include "oops/oopHandle.inline.hpp"
39 #include "oops/typeArrayKlass.hpp"
40 #include "oops/typeArrayOop.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #if INCLUDE_G1GC
45 #include "gc/g1/g1CollectedHeap.hpp"
46 #include "gc/g1/g1HeapRegion.hpp"
47 #endif
48
49 #if INCLUDE_CDS_JAVA_HEAP
50
51 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
52
53 // The following are offsets from buffer_bottom()
54 size_t ArchiveHeapWriter::_buffer_used;
55 size_t ArchiveHeapWriter::_heap_roots_offset;
56
57 size_t ArchiveHeapWriter::_heap_roots_word_size;
58
59 address ArchiveHeapWriter::_requested_bottom;
60 address ArchiveHeapWriter::_requested_top;
61
62 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
63 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
64 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
65
66 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
67 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
68
69
70 typedef ResourceHashtable<
71 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
72 size_t, // size of this filler (in bytes)
73 127, // prime number
74 AnyObj::C_HEAP,
75 mtClassShared> FillersTable;
76 static FillersTable* _fillers;
77 static int _num_native_ptrs = 0;
78
79 void ArchiveHeapWriter::init() {
80 if (HeapShared::can_write()) {
81 Universe::heap()->collect(GCCause::_java_lang_system_gc);
82
83 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
84 _fillers = new FillersTable();
85 _requested_bottom = nullptr;
86 _requested_top = nullptr;
87
88 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
89 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
90
91 guarantee(UseG1GC, "implementation limitation");
92 guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
93 }
94 }
95
96 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
97 _source_objs->append(src_obj);
98 }
99
100 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
101 ArchiveHeapInfo* heap_info) {
102 assert(HeapShared::can_write(), "sanity");
103 allocate_buffer();
104 copy_source_objs_to_buffer(roots);
105 set_requested_address(heap_info);
106 relocate_embedded_oops(roots, heap_info);
107 }
108
109 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
110 return is_too_large_to_archive(o->size());
111 }
112
113 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
114 typeArrayOop value = java_lang_String::value_no_keepalive(string);
115 return is_too_large_to_archive(value);
116 }
117
118 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
119 assert(size > 0, "no zero-size object");
120 assert(size * HeapWordSize > size, "no overflow");
121 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
122
123 size_t byte_size = size * HeapWordSize;
124 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
125 return true;
126 } else {
169 }
170
171 address ArchiveHeapWriter::requested_address() {
172 assert(_buffer != nullptr, "must be initialized");
173 return _requested_bottom;
174 }
175
176 void ArchiveHeapWriter::allocate_buffer() {
177 int initial_buffer_size = 100000;
178 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
179 _buffer_used = 0;
180 ensure_buffer_space(1); // so that buffer_bottom() works
181 }
182
183 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
184 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
185 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
186 _buffer->at_grow(to_array_index(min_bytes));
187 }
188
189 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
190 Klass* k = Universe::objectArrayKlass(); // already relocated to point to archived klass
191 int length = roots->length();
192 _heap_roots_word_size = objArrayOopDesc::object_size(length);
193 size_t byte_size = _heap_roots_word_size * HeapWordSize;
194 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
195 log_error(cds, heap)("roots array is too large. Please reduce the number of classes");
196 vm_exit(1);
197 }
198
199 maybe_fill_gc_region_gap(byte_size);
200
201 size_t new_used = _buffer_used + byte_size;
202 ensure_buffer_space(new_used);
203
204 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
205 memset(mem, 0, byte_size);
206 {
207 // This is copied from MemAllocator::finish
208 oopDesc::set_mark(mem, markWord::prototype());
209 oopDesc::release_set_klass(mem, k);
210 }
211 {
212 // This is copied from ObjArrayAllocator::initialize
213 arrayOopDesc::set_length(mem, length);
214 }
215
216 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
217 for (int i = 0; i < length; i++) {
218 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
219 oop o = roots->at(i);
220 if (UseCompressedOops) {
221 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
222 } else {
223 * arrayOop->obj_at_addr<oop>(i) = o;
224 }
225 }
226 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
227
228 _heap_roots_offset = _buffer_used;
229 _buffer_used = new_used;
230 }
231
232 static int oop_sorting_rank(oop o) {
233 bool has_oop_ptr, has_native_ptr;
234 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
235
236 if (!has_oop_ptr) {
237 if (!has_native_ptr) {
238 return 0;
239 } else {
240 return 1;
241 }
242 } else {
243 if (has_native_ptr) {
244 return 2;
245 } else {
246 return 3;
247 }
248 }
249 }
264 return a->_index - b->_index;
265 }
266 }
267
268 void ArchiveHeapWriter::sort_source_objs() {
269 log_info(cds)("sorting heap objects");
270 int len = _source_objs->length();
271 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
272
273 for (int i = 0; i < len; i++) {
274 oop o = _source_objs->at(i);
275 int rank = oop_sorting_rank(o);
276 HeapObjOrder os = {i, rank};
277 _source_objs_order->append(os);
278 }
279 log_info(cds)("computed ranks");
280 _source_objs_order->sort(compare_objs_by_oop_fields);
281 log_info(cds)("sorting heap objects done");
282 }
283
284 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
285 sort_source_objs();
286 for (int i = 0; i < _source_objs_order->length(); i++) {
287 int src_obj_index = _source_objs_order->at(i)._index;
288 oop src_obj = _source_objs->at(src_obj_index);
289 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
290 assert(info != nullptr, "must be");
291 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
292 info->set_buffer_offset(buffer_offset);
293
294 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
295 _buffer_offset_to_source_obj_table->maybe_grow();
296 }
297
298 copy_roots_to_buffer(roots);
299
300 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs",
301 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
302 }
303
304 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
305 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
306 return byte_size;
307 }
308
309 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
310 assert(is_object_aligned(fill_bytes), "must be");
311 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
312
313 int initial_length = to_array_length(fill_bytes / elemSize);
314 for (int length = initial_length; length >= 0; length --) {
315 size_t array_byte_size = filler_array_byte_size(length);
316 if (array_byte_size == fill_bytes) {
317 return length;
318 }
319 }
320
321 ShouldNotReachHere();
366 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
367 }
368 }
369
370 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
371 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
372 if (p != nullptr) {
373 assert(*p > 0, "filler must be larger than zero bytes");
374 return *p;
375 } else {
376 return 0; // buffered_addr is not a filler
377 }
378 }
379
380 template <typename T>
381 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
382 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
383 *field_addr = value;
384 }
385
386 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
387 assert(!is_too_large_to_archive(src_obj), "already checked");
388 size_t byte_size = src_obj->size() * HeapWordSize;
389 assert(byte_size > 0, "no zero-size objects");
390
391 // For region-based collectors such as G1, the archive heap may be mapped into
392 // multiple regions. We need to make sure that we don't have an object that can possible
393 // span across two regions.
394 maybe_fill_gc_region_gap(byte_size);
395
396 size_t new_used = _buffer_used + byte_size;
397 assert(new_used > _buffer_used, "no wrap around");
398
399 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
400 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
401 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
402
403 ensure_buffer_space(new_used);
404
405 address from = cast_from_oop<address>(src_obj);
406 address to = offset_to_buffered_address<address>(_buffer_used);
483
484 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
485 *buffered_addr = requested_obj;
486 }
487
488 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
489 narrowOop val = CompressedOops::encode_not_null(requested_obj);
490 *buffered_addr = val;
491 }
492
493 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
494 return *buffered_addr;
495 }
496
497 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
498 return CompressedOops::decode(*buffered_addr);
499 }
500
501 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
502 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
503 if (!CompressedOops::is_null(source_referent)) {
504 oop request_referent = source_obj_to_requested_obj(source_referent);
505 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
506 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
507 }
508 }
509
510 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
511 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
512 address requested_region_bottom;
513
514 assert(request_p >= (T*)_requested_bottom, "sanity");
515 assert(request_p < (T*)_requested_top, "sanity");
516 requested_region_bottom = _requested_bottom;
517
518 // Mark the pointer in the oopmap
519 T* region_bottom = (T*)requested_region_bottom;
520 assert(request_p >= region_bottom, "must be");
521 BitMap::idx_t idx = request_p - region_bottom;
522 assert(idx < oopmap->size(), "overflow");
523 oopmap->set_bit(idx);
527 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
528 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
529 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
530
531 oop fake_oop = cast_to_oop(buffered_addr);
532 fake_oop->set_narrow_klass(nk);
533
534 // We need to retain the identity_hash, because it may have been used by some hashtables
535 // in the shared heap.
536 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
537 intptr_t src_hash = src_obj->identity_hash();
538 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
539 assert(fake_oop->mark().is_unlocked(), "sanity");
540
541 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
542 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
543 }
544 }
545
546 // Relocate an element in the buffered copy of HeapShared::roots()
547 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
548 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
549 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
550 }
551
552 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
553 oop _src_obj;
554 address _buffered_obj;
555 CHeapBitMap* _oopmap;
556
557 public:
558 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
559 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
560
561 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
562 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
563
564 private:
565 template <class T> void do_oop_work(T *p) {
566 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
567 ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap);
568 }
569 };
570
571 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
572 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
573 size_t start = bitmap->find_first_set_bit(0);
574 size_t end = bitmap->size();
575 log_info(cds)("%s = " SIZE_FORMAT_W(7) " ... " SIZE_FORMAT_W(7) " (%3zu%% ... %3zu%% = %3zu%%)", which,
576 start, end,
577 start * 100 / total_bits,
578 end * 100 / total_bits,
579 (end - start) * 100 / total_bits);
580 }
581
582 // Update all oop fields embedded in the buffered objects
583 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
584 ArchiveHeapInfo* heap_info) {
585 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
586 size_t heap_region_byte_size = _buffer_used;
587 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
588
589 for (int i = 0; i < _source_objs_order->length(); i++) {
590 int src_obj_index = _source_objs_order->at(i)._index;
591 oop src_obj = _source_objs->at(src_obj_index);
592 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
593 assert(info != nullptr, "must be");
594 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
595 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
596 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
597 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
598 src_obj->oop_iterate(&relocator);
599 };
600
601 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
602 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
603 oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset);
604 update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlass());
605 int length = roots != nullptr ? roots->length() : 0;
606 for (int i = 0; i < length; i++) {
607 if (UseCompressedOops) {
608 relocate_root_at<narrowOop>(requested_roots, i, heap_info->oopmap());
609 } else {
610 relocate_root_at<oop>(requested_roots, i, heap_info->oopmap());
611 }
612 }
613
614 compute_ptrmap(heap_info);
615
616 size_t total_bytes = (size_t)_buffer->length();
617 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
618 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
619 }
620
621 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
622 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
623 if (ptr != nullptr) {
624 NativePointerInfo info;
625 info._src_obj = src_obj;
626 info._field_offset = field_offset;
627 _native_pointers->append(info);
628 HeapShared::set_has_native_pointers(src_obj);
629 _num_native_ptrs ++;
630 }
631 }
632
633 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
634 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
635 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
636 assert(p != nullptr, "must be");
637
638 // requested_field_addr = the address of this field in the requested space
639 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
640 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
641 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
642
643 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
644 // Leading zeros have been removed so some addresses may not be in the ptrmap
645 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
646 if (idx < start_pos) {
647 return false;
664 int field_offset = info._field_offset;
665 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
666 // requested_field_addr = the address of this field in the requested space
667 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
668 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
669 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
670
671 // Mark this field in the bitmap
672 BitMap::idx_t idx = requested_field_addr - bottom;
673 heap_info->ptrmap()->set_bit(idx);
674 num_non_null_ptrs ++;
675 max_idx = MAX2(max_idx, idx);
676
677 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
678 // this address if the RO/RW regions are mapped at the default location).
679
680 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
681 Metadata* native_ptr = *buffered_field_addr;
682 assert(native_ptr != nullptr, "sanity");
683
684 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
685 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
686 *buffered_field_addr = (Metadata*)requested_native_ptr;
687 }
688
689 heap_info->ptrmap()->resize(max_idx + 1);
690 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
691 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
692 }
693
694 #endif // INCLUDE_CDS_JAVA_HEAP
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "cds/regeneratedClasses.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "memory/iterator.inline.hpp"
35 #include "memory/oopFactory.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/compressedOops.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/objArrayOop.inline.hpp"
40 #include "oops/oopHandle.inline.hpp"
41 #include "oops/typeArrayKlass.hpp"
42 #include "oops/typeArrayOop.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #if INCLUDE_G1GC
47 #include "gc/g1/g1CollectedHeap.hpp"
48 #include "gc/g1/g1HeapRegion.hpp"
49 #endif
50
51 #if INCLUDE_CDS_JAVA_HEAP
52
53 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
54
55 // The following are offsets from buffer_bottom()
56 size_t ArchiveHeapWriter::_buffer_used;
57 size_t ArchiveHeapWriter::_heap_roots_offset;
58
59 size_t ArchiveHeapWriter::_heap_roots_word_size;
60
61 address ArchiveHeapWriter::_requested_bottom;
62 address ArchiveHeapWriter::_requested_top;
63
64 static size_t _num_strings = 0;
65 static size_t _string_bytes = 0;
66 static size_t _num_packages = 0;
67
68 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
69 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
70 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_perm_objs = nullptr;
71 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
72
73 static GrowableArrayCHeap<size_t, mtClassShared> *_permobj_seg_buffered_addrs = nullptr;
74 static GrowableArrayCHeap<size_t, mtClassShared> *_permobj_seg_bytesizes = nullptr;
75 static GrowableArrayCHeap<int, mtClassShared> *_permobj_seg_lengths = nullptr;
76
77 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
78 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
79
80
81 typedef ResourceHashtable<
82 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
83 size_t, // size of this filler (in bytes)
84 127, // prime number
85 AnyObj::C_HEAP,
86 mtClassShared> FillersTable;
87 static FillersTable* _fillers;
88 static int _num_native_ptrs = 0;
89
90 void ArchiveHeapWriter::init() {
91 if (HeapShared::can_write()) {
92 Universe::heap()->collect(GCCause::_java_lang_system_gc);
93
94 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
95 _fillers = new FillersTable();
96 _requested_bottom = nullptr;
97 _requested_top = nullptr;
98
99 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
100 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
101
102 _permobj_seg_buffered_addrs = new GrowableArrayCHeap<size_t, mtClassShared>(5);
103 _permobj_seg_bytesizes = new GrowableArrayCHeap<size_t, mtClassShared>(5);
104 _permobj_seg_lengths = new GrowableArrayCHeap<int, mtClassShared>(5);
105
106 guarantee(UseG1GC, "implementation limitation");
107 guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be");
108 }
109 }
110
111 void ArchiveHeapWriter::add_source_obj(oop src_obj) {
112 _source_objs->append(src_obj);
113 }
114
115 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
116 ArchiveHeapInfo* heap_info) {
117 ResourceMark rm;
118 GrowableArray<size_t> permobj_seg_offsets;
119 assert(HeapShared::can_write(), "sanity");
120 allocate_buffer();
121 int num_permobj = copy_source_objs_to_buffer(roots, &permobj_seg_offsets);
122 set_requested_address(heap_info);
123 relocate_embedded_oops(roots, heap_info, &permobj_seg_offsets, num_permobj);
124 if (UseCompressedOops) {
125 add_permobj_segments_to_roots<narrowOop>(roots, heap_info, &permobj_seg_offsets);
126 } else {
127 add_permobj_segments_to_roots<oop>(roots, heap_info, &permobj_seg_offsets);
128 }
129 }
130
131 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
132 return is_too_large_to_archive(o->size());
133 }
134
135 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
136 typeArrayOop value = java_lang_String::value_no_keepalive(string);
137 return is_too_large_to_archive(value);
138 }
139
140 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
141 assert(size > 0, "no zero-size object");
142 assert(size * HeapWordSize > size, "no overflow");
143 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
144
145 size_t byte_size = size * HeapWordSize;
146 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) {
147 return true;
148 } else {
191 }
192
193 address ArchiveHeapWriter::requested_address() {
194 assert(_buffer != nullptr, "must be initialized");
195 return _requested_bottom;
196 }
197
198 void ArchiveHeapWriter::allocate_buffer() {
199 int initial_buffer_size = 100000;
200 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
201 _buffer_used = 0;
202 ensure_buffer_space(1); // so that buffer_bottom() works
203 }
204
205 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
206 // We usually have very small heaps. If we get a huge one it's probably caused by a bug.
207 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
208 _buffer->at_grow(to_array_index(min_bytes));
209 }
210
211 size_t ArchiveHeapWriter::create_objarray_in_buffer(GrowableArrayCHeap<oop, mtClassShared>* input,
212 int from, // copy from this index in input
213 int num_elms, // copy this number of elements from input
214 int extra_length, // add extra elements at the end of the copy
215 size_t& objarray_word_size) {
216 Klass* k = Universe::objectArrayKlass(); // already relocated to point to archived klass
217 int length = num_elms + extra_length;
218 objarray_word_size = objArrayOopDesc::object_size(length);
219 size_t byte_size = objarray_word_size * HeapWordSize;
220 if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
221 log_error(cds, heap)("input array is too large. Please reduce the number of classes");
222 vm_exit(1);
223 }
224
225 maybe_fill_gc_region_gap(byte_size);
226
227 size_t new_used = _buffer_used + byte_size;
228 ensure_buffer_space(new_used);
229
230 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
231 memset(mem, 0, byte_size);
232 {
233 // This is copied from MemAllocator::finish
234 oopDesc::set_mark(mem, markWord::prototype());
235 oopDesc::release_set_klass(mem, k);
236 }
237 {
238 // This is copied from ObjArrayAllocator::initialize
239 arrayOopDesc::set_length(mem, length);
240 }
241
242 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem));
243 for (int i = 0; i < num_elms; i++) {
244 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
245 oop o = input->at(i + from);
246 if (UseCompressedOops) {
247 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o);
248 } else {
249 * arrayOop->obj_at_addr<oop>(i) = o;
250 }
251 }
252 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
253
254 size_t roots_bottom_offset = _buffer_used;
255 _buffer_used = new_used;
256
257 return roots_bottom_offset;
258 }
259
260 static int oop_sorting_rank(oop o) {
261 bool has_oop_ptr, has_native_ptr;
262 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr);
263
264 if (!has_oop_ptr) {
265 if (!has_native_ptr) {
266 return 0;
267 } else {
268 return 1;
269 }
270 } else {
271 if (has_native_ptr) {
272 return 2;
273 } else {
274 return 3;
275 }
276 }
277 }
292 return a->_index - b->_index;
293 }
294 }
295
296 void ArchiveHeapWriter::sort_source_objs() {
297 log_info(cds)("sorting heap objects");
298 int len = _source_objs->length();
299 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
300
301 for (int i = 0; i < len; i++) {
302 oop o = _source_objs->at(i);
303 int rank = oop_sorting_rank(o);
304 HeapObjOrder os = {i, rank};
305 _source_objs_order->append(os);
306 }
307 log_info(cds)("computed ranks");
308 _source_objs_order->sort(compare_objs_by_oop_fields);
309 log_info(cds)("sorting heap objects done");
310 }
311
312 int ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots, GrowableArray<size_t>* permobj_seg_offsets) {
313 sort_source_objs();
314 _perm_objs = new GrowableArrayCHeap<oop, mtClassShared>();
315 for (int i = 0; i < _source_objs_order->length(); i++) {
316 int src_obj_index = _source_objs_order->at(i)._index;
317 oop src_obj = _source_objs->at(src_obj_index);
318 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
319 assert(info != nullptr, "must be");
320 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
321 info->set_buffer_offset(buffer_offset);
322
323 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
324 _buffer_offset_to_source_obj_table->maybe_grow();
325 if (UsePermanentHeapObjects) {
326 // TODO: add only the objects that are needed by AOT. (How??)
327 int perm_index = _perm_objs->length();
328 HeapShared::add_to_permanent_index_table(src_obj, perm_index);
329 _perm_objs->append(src_obj);
330 }
331 }
332
333 // Create HeapShared::roots() in the output buffer. Reserve some extra slots at the end of it
334 // for the permobj_segments
335 int permobj_segments = (_perm_objs->length() + PERMOBJ_SEGMENT_MAX_LENGTH - 1) / PERMOBJ_SEGMENT_MAX_LENGTH;
336 _heap_roots_offset = create_objarray_in_buffer(roots, 0, roots->length(), permobj_segments, _heap_roots_word_size);
337
338 // Create the permobj_segments in the output buffer.
339 for (int from = 0; from < _perm_objs->length(); from += PERMOBJ_SEGMENT_MAX_LENGTH) {
340 int num_elems = MIN2(PERMOBJ_SEGMENT_MAX_LENGTH, _perm_objs->length() - from);
341 size_t word_size;
342 size_t permobj_seg_bottom_offset = create_objarray_in_buffer(_perm_objs, from, num_elems, 0, word_size);
343 permobj_seg_offsets->append(permobj_seg_bottom_offset);
344 _permobj_seg_buffered_addrs->append(permobj_seg_bottom_offset);
345 _permobj_seg_bytesizes->append(word_size * HeapWordSize);
346 _permobj_seg_lengths->append(num_elems);
347 }
348
349 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs, %d permobjs in %d segments",
350 _buffer_used, _source_objs->length() + 2, roots->length(), _num_native_ptrs, _perm_objs->length(), permobj_segments);
351 log_info(cds)(" strings = " SIZE_FORMAT_W(8) " (" SIZE_FORMAT " bytes)", _num_strings, _string_bytes);
352 log_info(cds)(" packages = " SIZE_FORMAT_W(8), _num_packages);
353
354 assert(permobj_seg_offsets->length() == permobj_segments, "sanity");
355 HeapShared::set_permobj_segments(permobj_segments);
356 int n = _perm_objs->length();
357 return n;
358 }
359
360 oop ArchiveHeapWriter::get_perm_object_by_index(int permanent_index) {
361 if (_perm_objs != nullptr && 0 <= permanent_index && permanent_index < _perm_objs->length()) {
362 return _perm_objs->at(permanent_index);
363 } else {
364 return nullptr;
365 }
366 }
367
368 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
369 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
370 return byte_size;
371 }
372
373 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
374 assert(is_object_aligned(fill_bytes), "must be");
375 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
376
377 int initial_length = to_array_length(fill_bytes / elemSize);
378 for (int length = initial_length; length >= 0; length --) {
379 size_t array_byte_size = filler_array_byte_size(length);
380 if (array_byte_size == fill_bytes) {
381 return length;
382 }
383 }
384
385 ShouldNotReachHere();
430 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
431 }
432 }
433
434 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
435 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
436 if (p != nullptr) {
437 assert(*p > 0, "filler must be larger than zero bytes");
438 return *p;
439 } else {
440 return 0; // buffered_addr is not a filler
441 }
442 }
443
444 template <typename T>
445 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
446 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
447 *field_addr = value;
448 }
449
450 void ArchiveHeapWriter::update_stats(oop src_obj) {
451 if (java_lang_String::is_instance(src_obj)) {
452 _num_strings ++;
453 _string_bytes += src_obj->size() * HeapWordSize;
454 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
455 } else {
456 Klass* k = src_obj->klass();
457 Symbol* name = k->name();
458 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
459 _num_packages ++;
460 }
461 }
462 }
463
464 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
465 update_stats(src_obj);
466
467 assert(!is_too_large_to_archive(src_obj), "already checked");
468 size_t byte_size = src_obj->size() * HeapWordSize;
469 assert(byte_size > 0, "no zero-size objects");
470
471 // For region-based collectors such as G1, the archive heap may be mapped into
472 // multiple regions. We need to make sure that we don't have an object that can possible
473 // span across two regions.
474 maybe_fill_gc_region_gap(byte_size);
475
476 size_t new_used = _buffer_used + byte_size;
477 assert(new_used > _buffer_used, "no wrap around");
478
479 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
480 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
481 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
482
483 ensure_buffer_space(new_used);
484
485 address from = cast_from_oop<address>(src_obj);
486 address to = offset_to_buffered_address<address>(_buffer_used);
563
564 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
565 *buffered_addr = requested_obj;
566 }
567
568 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
569 narrowOop val = CompressedOops::encode_not_null(requested_obj);
570 *buffered_addr = val;
571 }
572
573 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
574 return *buffered_addr;
575 }
576
577 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
578 return CompressedOops::decode(*buffered_addr);
579 }
580
581 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
582 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
583 if (source_referent != nullptr) {
584 if (java_lang_Class::is_instance(source_referent)) {
585 source_referent = HeapShared::scratch_java_mirror(source_referent);
586 assert(source_referent != nullptr, "must be");
587 }
588 oop request_referent = source_obj_to_requested_obj(source_referent);
589 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
590 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
591 }
592 }
593
594 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
595 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
596 address requested_region_bottom;
597
598 assert(request_p >= (T*)_requested_bottom, "sanity");
599 assert(request_p < (T*)_requested_top, "sanity");
600 requested_region_bottom = _requested_bottom;
601
602 // Mark the pointer in the oopmap
603 T* region_bottom = (T*)requested_region_bottom;
604 assert(request_p >= region_bottom, "must be");
605 BitMap::idx_t idx = request_p - region_bottom;
606 assert(idx < oopmap->size(), "overflow");
607 oopmap->set_bit(idx);
611 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
612 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
613 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
614
615 oop fake_oop = cast_to_oop(buffered_addr);
616 fake_oop->set_narrow_klass(nk);
617
618 // We need to retain the identity_hash, because it may have been used by some hashtables
619 // in the shared heap.
620 if (src_obj != nullptr && !src_obj->fast_no_hash_check()) {
621 intptr_t src_hash = src_obj->identity_hash();
622 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash));
623 assert(fake_oop->mark().is_unlocked(), "sanity");
624
625 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash());
626 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash);
627 }
628 }
629
630 // Relocate an element in the buffered copy of HeapShared::roots()
631 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, address buffered_roots_addr, int index, CHeapBitMap* oopmap) {
632 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
633 relocate_field_in_buffer<T>((T*)(buffered_roots_addr + offset), oopmap);
634 }
635
636 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
637 oop _src_obj;
638 address _buffered_obj;
639 CHeapBitMap* _oopmap;
640
641 public:
642 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
643 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
644
645 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
646 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
647
648 private:
649 template <class T> void do_oop_work(T *p) {
650 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
651 ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap);
652 }
653 };
654
655 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
656 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
657 size_t start = bitmap->find_first_set_bit(0);
658 size_t end = bitmap->size();
659 log_info(cds)("%s = " SIZE_FORMAT_W(7) " ... " SIZE_FORMAT_W(7) " (%3zu%% ... %3zu%% = %3zu%%)", which,
660 start, end,
661 start * 100 / total_bits,
662 end * 100 / total_bits,
663 (end - start) * 100 / total_bits);
664 }
665
666 // Update all oop fields embedded in the buffered objects
667 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
668 ArchiveHeapInfo* heap_info,
669 GrowableArray<size_t>* permobj_seg_offsets,
670 int num_permobjs) {
671 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
672 size_t heap_region_byte_size = _buffer_used;
673 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
674
675 for (int i = 0; i < _source_objs_order->length(); i++) {
676 int src_obj_index = _source_objs_order->at(i)._index;
677 oop src_obj = _source_objs->at(src_obj_index);
678 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
679 assert(info != nullptr, "must be");
680 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset());
681 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
682 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
683 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
684 src_obj->oop_iterate(&relocator);
685 };
686
687 // Relocate HeapShared::roots(), which is created in create_objarray_in_buffer() and
688 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
689 oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset);
690 update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlass());
691 int length = roots != nullptr ? roots->length() : 0;
692 for (int i = 0; i < length; i++) {
693 if (UseCompressedOops) {
694 relocate_root_at<narrowOop>(requested_roots, buffered_heap_roots_addr(), i, heap_info->oopmap());
695 } else {
696 relocate_root_at<oop>(requested_roots, buffered_heap_roots_addr(), i, heap_info->oopmap());
697 }
698 }
699
700 int num_permobjs_relocated = 0;
701 for (int i = 0; i < permobj_seg_offsets->length(); i++) {
702 int length = MIN2(PERMOBJ_SEGMENT_MAX_LENGTH, num_permobjs - num_permobjs_relocated);
703 // Relocate each of the segments. They were created in create_objarray_in_buffer() and
704 // don't have a corresponding src_obj, so we can't use EmbeddedOopRelocator.
705 size_t permobj_seg_bottom_offset = permobj_seg_offsets->at(i);
706 oop requested_permobj_seg = requested_obj_from_buffer_offset(permobj_seg_bottom_offset);
707 update_header_for_requested_obj(requested_permobj_seg, nullptr, Universe::objectArrayKlass());
708 for (int i = 0; i < length; i++) {
709 address buffered_addr = offset_to_buffered_address<address>(permobj_seg_bottom_offset);
710 if (UseCompressedOops) {
711 relocate_root_at<narrowOop>(requested_permobj_seg, buffered_addr, i, heap_info->oopmap());
712 } else {
713 relocate_root_at<oop>(requested_permobj_seg, buffered_addr, i, heap_info->oopmap());
714 }
715 }
716 num_permobjs_relocated += length;
717 }
718
719 compute_ptrmap(heap_info);
720
721 size_t total_bytes = (size_t)_buffer->length();
722 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
723 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
724 }
725
726 // Put the permobj_segments in the extra space that we have reserved at the end of the HeapShared::roots() array.
727 template <typename T> void ArchiveHeapWriter::add_permobj_segments_to_roots(GrowableArrayCHeap<oop, mtClassShared>* roots,
728 ArchiveHeapInfo* heap_info,
729 GrowableArray<size_t>* permobj_seg_offsets) {
730 for (int i = 0; i < permobj_seg_offsets->length(); i++) {
731 size_t permobj_seg_bottom_offset = permobj_seg_offsets->at(i);
732 oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset);
733 oop requested_permobj_seg = requested_obj_from_buffer_offset(permobj_seg_bottom_offset);
734 int permobj_index = roots->length() + i;
735
736 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(permobj_index);
737 T* addr = (T*)(buffered_heap_roots_addr() + offset);
738 store_requested_oop_in_buffer<T>(addr, requested_permobj_seg);
739 mark_oop_pointer<T>(addr, heap_info->oopmap());
740 }
741 }
742
743 // If the buffered_addr is one of the permobj segments, returns the size information about this segment.
744 int ArchiveHeapWriter::get_permobj_segment_at(address buffered_addr, size_t* byte_size, int* permobj_segment_length) {
745 size_t offset = buffered_addr - buffer_bottom();
746 for (int i = 0; i < _permobj_seg_buffered_addrs->length(); i++) {
747 if (offset == _permobj_seg_buffered_addrs->at(i)) {
748 *byte_size = _permobj_seg_bytesizes->at(i);
749 *permobj_segment_length = _permobj_seg_lengths->at(i);
750 return i;
751 }
752 }
753 return -1;
754 }
755
756 oop ArchiveHeapWriter::get_permobj_source_addr(int permobj_segment, int index) {
757 for (int i = 0; i < permobj_segment; i++) {
758 index += _permobj_seg_lengths->at(i);
759 }
760
761 return _source_objs->at(index);
762 }
763
764 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
765 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
766 if (ptr != nullptr) {
767 NativePointerInfo info;
768 info._src_obj = src_obj;
769 info._field_offset = field_offset;
770 _native_pointers->append(info);
771 assert(ArchiveBuilder::current()->has_been_archived((address)ptr), "must be archived %p", ptr);
772 HeapShared::set_has_native_pointers(src_obj);
773 _num_native_ptrs ++;
774 }
775 }
776
777 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
778 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
779 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
780 assert(p != nullptr, "must be");
781
782 // requested_field_addr = the address of this field in the requested space
783 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
784 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
785 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
786
787 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
788 // Leading zeros have been removed so some addresses may not be in the ptrmap
789 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
790 if (idx < start_pos) {
791 return false;
808 int field_offset = info._field_offset;
809 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
810 // requested_field_addr = the address of this field in the requested space
811 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
812 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
813 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
814
815 // Mark this field in the bitmap
816 BitMap::idx_t idx = requested_field_addr - bottom;
817 heap_info->ptrmap()->set_bit(idx);
818 num_non_null_ptrs ++;
819 max_idx = MAX2(max_idx, idx);
820
821 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
822 // this address if the RO/RW regions are mapped at the default location).
823
824 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
825 Metadata* native_ptr = *buffered_field_addr;
826 assert(native_ptr != nullptr, "sanity");
827
828 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
829 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
830 }
831
832 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
833 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
834 *buffered_field_addr = (Metadata*)requested_native_ptr;
835 }
836
837 heap_info->ptrmap()->resize(max_idx + 1);
838 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
839 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
840 }
841
842 #endif // INCLUDE_CDS_JAVA_HEAP
|