1 /*
2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "gc/shared/collectedHeap.hpp"
32 #include "memory/iterator.inline.hpp"
33 #include "memory/oopFactory.hpp"
34 #include "memory/universe.hpp"
35 #include "oops/compressedOops.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/objArrayOop.inline.hpp"
38 #include "oops/oopHandle.inline.hpp"
39 #include "oops/typeArrayKlass.hpp"
40 #include "oops/typeArrayOop.hpp"
41 #include "runtime/java.hpp"
42 #include "runtime/mutexLocker.hpp"
43 #include "utilities/bitMap.inline.hpp"
44 #if INCLUDE_G1GC
45 #include "gc/g1/g1CollectedHeap.hpp"
46 #include "gc/g1/g1HeapRegion.hpp"
47 #endif
48
49 #if INCLUDE_CDS_JAVA_HEAP
50
51 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
52
53 // The following are offsets from buffer_bottom()
54 size_t ArchiveHeapWriter::_buffer_used;
55
56 // Heap root segments
57 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
58
59 address ArchiveHeapWriter::_requested_bottom;
60 address ArchiveHeapWriter::_requested_top;
61
62 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
63 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
64 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
65
66 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
67 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
68
69
70 typedef ResourceHashtable<
71 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
72 size_t, // size of this filler (in bytes)
73 127, // prime number
74 AnyObj::C_HEAP,
75 mtClassShared> FillersTable;
76 static FillersTable* _fillers;
77 static int _num_native_ptrs = 0;
78
79 void ArchiveHeapWriter::init() {
80 if (HeapShared::can_write()) {
81 Universe::heap()->collect(GCCause::_java_lang_system_gc);
298 _source_objs_order->append(os);
299 }
300 log_info(cds)("computed ranks");
301 _source_objs_order->sort(compare_objs_by_oop_fields);
302 log_info(cds)("sorting heap objects done");
303 }
304
305 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
306 // There could be multiple root segments, which we want to be aligned by region.
307 // Putting them ahead of objects makes sure we waste no space.
308 copy_roots_to_buffer(roots);
309
310 sort_source_objs();
311 for (int i = 0; i < _source_objs_order->length(); i++) {
312 int src_obj_index = _source_objs_order->at(i)._index;
313 oop src_obj = _source_objs->at(src_obj_index);
314 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
315 assert(info != nullptr, "must be");
316 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
317 info->set_buffer_offset(buffer_offset);
318
319 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
320 _buffer_offset_to_source_obj_table->maybe_grow();
321 }
322
323 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs",
324 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
325 }
326
327 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
328 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
329 return byte_size;
330 }
331
332 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
333 assert(is_object_aligned(fill_bytes), "must be");
334 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
335
336 int initial_length = to_array_length(fill_bytes / elemSize);
337 for (int length = initial_length; length >= 0; length --) {
338 size_t array_byte_size = filler_array_byte_size(length);
339 if (array_byte_size == fill_bytes) {
340 return length;
341 }
342 }
343
344 ShouldNotReachHere();
389 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
390 }
391 }
392
393 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
394 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
395 if (p != nullptr) {
396 assert(*p > 0, "filler must be larger than zero bytes");
397 return *p;
398 } else {
399 return 0; // buffered_addr is not a filler
400 }
401 }
402
403 template <typename T>
404 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
405 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
406 *field_addr = value;
407 }
408
409 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
410 assert(!is_too_large_to_archive(src_obj), "already checked");
411 size_t byte_size = src_obj->size() * HeapWordSize;
412 assert(byte_size > 0, "no zero-size objects");
413
414 // For region-based collectors such as G1, the archive heap may be mapped into
415 // multiple regions. We need to make sure that we don't have an object that can possible
416 // span across two regions.
417 maybe_fill_gc_region_gap(byte_size);
418
419 size_t new_used = _buffer_used + byte_size;
420 assert(new_used > _buffer_used, "no wrap around");
421
422 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
423 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
424 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
425
426 ensure_buffer_space(new_used);
427
428 address from = cast_from_oop<address>(src_obj);
429 address to = offset_to_buffered_address<address>(_buffer_used);
510
511 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
512 *buffered_addr = requested_obj;
513 }
514
515 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
516 narrowOop val = CompressedOops::encode_not_null(requested_obj);
517 *buffered_addr = val;
518 }
519
520 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
521 return *buffered_addr;
522 }
523
524 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
525 return CompressedOops::decode(*buffered_addr);
526 }
527
528 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
529 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
530 if (!CompressedOops::is_null(source_referent)) {
531 oop request_referent = source_obj_to_requested_obj(source_referent);
532 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
533 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
534 }
535 }
536
537 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
538 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
539 address requested_region_bottom;
540
541 assert(request_p >= (T*)_requested_bottom, "sanity");
542 assert(request_p < (T*)_requested_top, "sanity");
543 requested_region_bottom = _requested_bottom;
544
545 // Mark the pointer in the oopmap
546 T* region_bottom = (T*)requested_region_bottom;
547 assert(request_p >= region_bottom, "must be");
548 BitMap::idx_t idx = request_p - region_bottom;
549 assert(idx < oopmap->size(), "overflow");
550 oopmap->set_bit(idx);
639 oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i));
640 relocate_field_in_buffer<oop>(addr, heap_info->oopmap());
641 }
642 }
643 }
644
645 compute_ptrmap(heap_info);
646
647 size_t total_bytes = (size_t)_buffer->length();
648 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
649 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
650 }
651
652 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
653 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
654 if (ptr != nullptr) {
655 NativePointerInfo info;
656 info._src_obj = src_obj;
657 info._field_offset = field_offset;
658 _native_pointers->append(info);
659 HeapShared::set_has_native_pointers(src_obj);
660 _num_native_ptrs ++;
661 }
662 }
663
664 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
665 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
666 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
667 assert(p != nullptr, "must be");
668
669 // requested_field_addr = the address of this field in the requested space
670 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
671 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
672 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
673
674 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
675 // Leading zeros have been removed so some addresses may not be in the ptrmap
676 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
677 if (idx < start_pos) {
678 return false;
695 int field_offset = info._field_offset;
696 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
697 // requested_field_addr = the address of this field in the requested space
698 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
699 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
700 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
701
702 // Mark this field in the bitmap
703 BitMap::idx_t idx = requested_field_addr - bottom;
704 heap_info->ptrmap()->set_bit(idx);
705 num_non_null_ptrs ++;
706 max_idx = MAX2(max_idx, idx);
707
708 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
709 // this address if the RO/RW regions are mapped at the default location).
710
711 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
712 Metadata* native_ptr = *buffered_field_addr;
713 assert(native_ptr != nullptr, "sanity");
714
715 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
716 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
717 *buffered_field_addr = (Metadata*)requested_native_ptr;
718 }
719
720 heap_info->ptrmap()->resize(max_idx + 1);
721 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
722 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
723 }
724
725 #endif // INCLUDE_CDS_JAVA_HEAP
|
1 /*
2 * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "cds/archiveHeapWriter.hpp"
27 #include "cds/cdsConfig.hpp"
28 #include "cds/filemap.hpp"
29 #include "cds/heapShared.hpp"
30 #include "cds/regeneratedClasses.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "gc/shared/collectedHeap.hpp"
34 #include "memory/iterator.inline.hpp"
35 #include "memory/oopFactory.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/compressedOops.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "oops/objArrayOop.inline.hpp"
40 #include "oops/oopHandle.inline.hpp"
41 #include "oops/typeArrayKlass.hpp"
42 #include "oops/typeArrayOop.hpp"
43 #include "runtime/java.hpp"
44 #include "runtime/mutexLocker.hpp"
45 #include "utilities/bitMap.inline.hpp"
46 #if INCLUDE_G1GC
47 #include "gc/g1/g1CollectedHeap.hpp"
48 #include "gc/g1/g1HeapRegion.hpp"
49 #endif
50
51 #if INCLUDE_CDS_JAVA_HEAP
52
53 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
54
55 // The following are offsets from buffer_bottom()
56 size_t ArchiveHeapWriter::_buffer_used;
57
58 // Heap root segments
59 HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
60
61 address ArchiveHeapWriter::_requested_bottom;
62 address ArchiveHeapWriter::_requested_top;
63
64 static size_t _num_strings = 0;
65 static size_t _string_bytes = 0;
66 static size_t _num_packages = 0;
67 static size_t _num_protection_domains = 0;
68
69 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
70 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
71 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
72
73 ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
74 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
75
76
77 typedef ResourceHashtable<
78 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
79 size_t, // size of this filler (in bytes)
80 127, // prime number
81 AnyObj::C_HEAP,
82 mtClassShared> FillersTable;
83 static FillersTable* _fillers;
84 static int _num_native_ptrs = 0;
85
86 void ArchiveHeapWriter::init() {
87 if (HeapShared::can_write()) {
88 Universe::heap()->collect(GCCause::_java_lang_system_gc);
305 _source_objs_order->append(os);
306 }
307 log_info(cds)("computed ranks");
308 _source_objs_order->sort(compare_objs_by_oop_fields);
309 log_info(cds)("sorting heap objects done");
310 }
311
312 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
313 // There could be multiple root segments, which we want to be aligned by region.
314 // Putting them ahead of objects makes sure we waste no space.
315 copy_roots_to_buffer(roots);
316
317 sort_source_objs();
318 for (int i = 0; i < _source_objs_order->length(); i++) {
319 int src_obj_index = _source_objs_order->at(i)._index;
320 oop src_obj = _source_objs->at(src_obj_index);
321 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
322 assert(info != nullptr, "must be");
323 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
324 info->set_buffer_offset(buffer_offset);
325 assert(buffer_offset <= 0x7fffffff, "sanity");
326 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset);
327
328 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj);
329 _buffer_offset_to_source_obj_table->maybe_grow();
330 }
331
332 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs",
333 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
334 log_info(cds)(" strings = " SIZE_FORMAT_W(8) " (" SIZE_FORMAT " bytes)", _num_strings, _string_bytes);
335 log_info(cds)(" packages = " SIZE_FORMAT_W(8), _num_packages);
336 log_info(cds)(" protection domains = " SIZE_FORMAT_W(8),_num_protection_domains);
337 }
338
339 size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
340 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
341 return byte_size;
342 }
343
344 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
345 assert(is_object_aligned(fill_bytes), "must be");
346 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
347
348 int initial_length = to_array_length(fill_bytes / elemSize);
349 for (int length = initial_length; length >= 0; length --) {
350 size_t array_byte_size = filler_array_byte_size(length);
351 if (array_byte_size == fill_bytes) {
352 return length;
353 }
354 }
355
356 ShouldNotReachHere();
401 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes);
402 }
403 }
404
405 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
406 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
407 if (p != nullptr) {
408 assert(*p > 0, "filler must be larger than zero bytes");
409 return *p;
410 } else {
411 return 0; // buffered_addr is not a filler
412 }
413 }
414
415 template <typename T>
416 void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
417 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
418 *field_addr = value;
419 }
420
421 void ArchiveHeapWriter::update_stats(oop src_obj) {
422 if (java_lang_String::is_instance(src_obj)) {
423 _num_strings ++;
424 _string_bytes += src_obj->size() * HeapWordSize;
425 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
426 } else {
427 Klass* k = src_obj->klass();
428 Symbol* name = k->name();
429 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
430 _num_packages ++;
431 } else if (name->equals("java/security/ProtectionDomain")) {
432 _num_protection_domains ++;
433 }
434 }
435 }
436
437 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
438 update_stats(src_obj);
439
440 assert(!is_too_large_to_archive(src_obj), "already checked");
441 size_t byte_size = src_obj->size() * HeapWordSize;
442 assert(byte_size > 0, "no zero-size objects");
443
444 // For region-based collectors such as G1, the archive heap may be mapped into
445 // multiple regions. We need to make sure that we don't have an object that can possible
446 // span across two regions.
447 maybe_fill_gc_region_gap(byte_size);
448
449 size_t new_used = _buffer_used + byte_size;
450 assert(new_used > _buffer_used, "no wrap around");
451
452 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
453 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
454 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
455
456 ensure_buffer_space(new_used);
457
458 address from = cast_from_oop<address>(src_obj);
459 address to = offset_to_buffered_address<address>(_buffer_used);
540
541 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
542 *buffered_addr = requested_obj;
543 }
544
545 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
546 narrowOop val = CompressedOops::encode_not_null(requested_obj);
547 *buffered_addr = val;
548 }
549
550 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
551 return *buffered_addr;
552 }
553
554 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
555 return CompressedOops::decode(*buffered_addr);
556 }
557
558 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
559 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
560 if (source_referent != nullptr) {
561 if (java_lang_Class::is_instance(source_referent)) {
562 source_referent = HeapShared::scratch_java_mirror(source_referent);
563 assert(source_referent != nullptr, "must be");
564 }
565 oop request_referent = source_obj_to_requested_obj(source_referent);
566 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
567 mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
568 }
569 }
570
571 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
572 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
573 address requested_region_bottom;
574
575 assert(request_p >= (T*)_requested_bottom, "sanity");
576 assert(request_p < (T*)_requested_top, "sanity");
577 requested_region_bottom = _requested_bottom;
578
579 // Mark the pointer in the oopmap
580 T* region_bottom = (T*)requested_region_bottom;
581 assert(request_p >= region_bottom, "must be");
582 BitMap::idx_t idx = request_p - region_bottom;
583 assert(idx < oopmap->size(), "overflow");
584 oopmap->set_bit(idx);
673 oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i));
674 relocate_field_in_buffer<oop>(addr, heap_info->oopmap());
675 }
676 }
677 }
678
679 compute_ptrmap(heap_info);
680
681 size_t total_bytes = (size_t)_buffer->length();
682 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)));
683 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
684 }
685
686 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
687 Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
688 if (ptr != nullptr) {
689 NativePointerInfo info;
690 info._src_obj = src_obj;
691 info._field_offset = field_offset;
692 _native_pointers->append(info);
693 if (!ArchiveBuilder::current()->has_been_archived((address)ptr)) {
694 // Currently we supporting marking of only Method and Klass, both of which are
695 // subtypes of MetaData.
696 ResourceMark rm;
697 log_error(cds, heap)("Native pointer %p is not archived", ptr);
698 if (((Metadata*)ptr)->is_method()) {
699 log_error(cds, heap)("Method: %s", ((Method*)ptr)->external_name());
700 } else {
701 assert(((Metadata*)ptr)->is_klass(), "must be");
702 log_error(cds, heap)("Klass: %s", ((Klass*)ptr)->external_name());
703 }
704 HeapShared::exit_on_error();
705 }
706 HeapShared::set_has_native_pointers(src_obj);
707 _num_native_ptrs ++;
708 }
709 }
710
711 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj?
712 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) {
713 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
714 assert(p != nullptr, "must be");
715
716 // requested_field_addr = the address of this field in the requested space
717 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
718 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
719 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check");
720
721 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom;
722 // Leading zeros have been removed so some addresses may not be in the ptrmap
723 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos();
724 if (idx < start_pos) {
725 return false;
742 int field_offset = info._field_offset;
743 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
744 // requested_field_addr = the address of this field in the requested space
745 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
746 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
747 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check");
748
749 // Mark this field in the bitmap
750 BitMap::idx_t idx = requested_field_addr - bottom;
751 heap_info->ptrmap()->set_bit(idx);
752 num_non_null_ptrs ++;
753 max_idx = MAX2(max_idx, idx);
754
755 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
756 // this address if the RO/RW regions are mapped at the default location).
757
758 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr);
759 Metadata* native_ptr = *buffered_field_addr;
760 assert(native_ptr != nullptr, "sanity");
761
762 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) {
763 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr);
764 }
765
766 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
767 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
768 *buffered_field_addr = (Metadata*)requested_native_ptr;
769 }
770
771 heap_info->ptrmap()->resize(max_idx + 1);
772 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
773 num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
774 }
775
776 #endif // INCLUDE_CDS_JAVA_HEAP
|