1 /* 2 * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "cds/aotReferenceObjSupport.hpp" 26 #include "cds/archiveHeapWriter.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/filemap.hpp" 29 #include "cds/heapShared.hpp" 30 #include "cds/regeneratedClasses.hpp" 31 #include "classfile/javaClasses.hpp" 32 #include "classfile/modules.hpp" 33 #include "classfile/systemDictionary.hpp" 34 #include "gc/shared/collectedHeap.hpp" 35 #include "memory/iterator.inline.hpp" 36 #include "memory/oopFactory.hpp" 37 #include "memory/universe.hpp" 38 #include "oops/compressedOops.hpp" 39 #include "oops/objArrayOop.inline.hpp" 40 #include "oops/oop.inline.hpp" 41 #include "oops/oopHandle.inline.hpp" 42 #include "oops/typeArrayKlass.hpp" 43 #include "oops/typeArrayOop.hpp" 44 #include "runtime/java.hpp" 45 #include "runtime/mutexLocker.hpp" 46 #include "utilities/bitMap.inline.hpp" 47 #if INCLUDE_G1GC 48 #include "gc/g1/g1CollectedHeap.hpp" 49 #include "gc/g1/g1HeapRegion.hpp" 50 #endif 51 52 #if INCLUDE_CDS_JAVA_HEAP 53 54 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr; 55 56 // The following are offsets from buffer_bottom() 57 size_t ArchiveHeapWriter::_buffer_used; 58 59 // Heap root segments 60 HeapRootSegments ArchiveHeapWriter::_heap_root_segments; 61 62 address ArchiveHeapWriter::_requested_bottom; 63 address ArchiveHeapWriter::_requested_top; 64 65 static size_t _num_strings = 0; 66 static size_t _string_bytes = 0; 67 static size_t _num_packages = 0; 68 static size_t _num_protection_domains = 0; 69 70 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers; 71 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs; 72 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order; 73 74 ArchiveHeapWriter::BufferOffsetToSourceObjectTable* 75 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr; 76 77 78 typedef ResourceHashtable< 79 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom() 80 size_t, // size of this filler (in bytes) 81 127, // prime number 82 AnyObj::C_HEAP, 83 mtClassShared> FillersTable; 84 static FillersTable* _fillers; 85 static int _num_native_ptrs = 0; 86 87 void ArchiveHeapWriter::init() { 88 if (CDSConfig::is_dumping_heap()) { 89 Universe::heap()->collect(GCCause::_java_lang_system_gc); 90 91 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M); 92 _fillers = new FillersTable(); 93 _requested_bottom = nullptr; 94 _requested_top = nullptr; 95 96 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048); 97 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000); 98 99 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be"); 100 } 101 } 102 103 void ArchiveHeapWriter::delete_tables_with_raw_oops() { 104 delete _source_objs; 105 _source_objs = nullptr; 106 } 107 108 void ArchiveHeapWriter::add_source_obj(oop src_obj) { 109 _source_objs->append(src_obj); 110 } 111 112 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots, 113 ArchiveHeapInfo* heap_info) { 114 assert(CDSConfig::is_dumping_heap(), "sanity"); 115 allocate_buffer(); 116 copy_source_objs_to_buffer(roots); 117 set_requested_address(heap_info); 118 relocate_embedded_oops(roots, heap_info); 119 } 120 121 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) { 122 return is_too_large_to_archive(o->size()); 123 } 124 125 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) { 126 typeArrayOop value = java_lang_String::value_no_keepalive(string); 127 return is_too_large_to_archive(value); 128 } 129 130 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) { 131 assert(size > 0, "no zero-size object"); 132 assert(size * HeapWordSize > size, "no overflow"); 133 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive"); 134 135 size_t byte_size = size * HeapWordSize; 136 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) { 137 return true; 138 } else { 139 return false; 140 } 141 } 142 143 // Various lookup functions between source_obj, buffered_obj and requested_obj 144 bool ArchiveHeapWriter::is_in_requested_range(oop o) { 145 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized"); 146 address a = cast_from_oop<address>(o); 147 return (_requested_bottom <= a && a < _requested_top); 148 } 149 150 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) { 151 oop req_obj = cast_to_oop(_requested_bottom + offset); 152 assert(is_in_requested_range(req_obj), "must be"); 153 return req_obj; 154 } 155 156 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) { 157 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 158 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj); 159 if (p != nullptr) { 160 return requested_obj_from_buffer_offset(p->buffer_offset()); 161 } else { 162 return nullptr; 163 } 164 } 165 166 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) { 167 OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr)); 168 if (oh != nullptr) { 169 return oh->resolve(); 170 } else { 171 return nullptr; 172 } 173 } 174 175 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) { 176 return _requested_bottom + buffered_address_to_offset(buffered_addr); 177 } 178 179 address ArchiveHeapWriter::requested_address() { 180 assert(_buffer != nullptr, "must be initialized"); 181 return _requested_bottom; 182 } 183 184 void ArchiveHeapWriter::allocate_buffer() { 185 int initial_buffer_size = 100000; 186 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size); 187 _buffer_used = 0; 188 ensure_buffer_space(1); // so that buffer_bottom() works 189 } 190 191 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) { 192 // We usually have very small heaps. If we get a huge one it's probably caused by a bug. 193 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects"); 194 _buffer->at_grow(to_array_index(min_bytes)); 195 } 196 197 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) { 198 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset); 199 memset(mem, 0, objArrayOopDesc::object_size(element_count)); 200 201 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize. 202 if (UseCompactObjectHeaders) { 203 oopDesc::release_set_mark(mem, Universe::objectArrayKlass()->prototype_header()); 204 } else { 205 oopDesc::set_mark(mem, markWord::prototype()); 206 oopDesc::release_set_klass(mem, Universe::objectArrayKlass()); 207 } 208 arrayOopDesc::set_length(mem, element_count); 209 return objArrayOop(cast_to_oop(mem)); 210 } 211 212 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) { 213 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap! 214 if (UseCompressedOops) { 215 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root); 216 } else { 217 *segment->obj_at_addr<oop>(index) = root; 218 } 219 } 220 221 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 222 // Depending on the number of classes we are archiving, a single roots array may be 223 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which 224 // allows us to chop the large array into a series of "segments". Current layout 225 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end 226 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT. 227 // This is simple and efficient. We do not need filler objects anywhere between the segments, 228 // or immediately after the last segment. This allows starting the object dump immediately 229 // after the roots. 230 231 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0, 232 "Pre-condition: Roots start at aligned boundary: %zu", _buffer_used); 233 234 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize); 235 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT, 236 "Should match exactly"); 237 238 HeapRootSegments segments(_buffer_used, 239 roots->length(), 240 MIN_GC_REGION_ALIGNMENT, 241 max_elem_count); 242 243 int root_index = 0; 244 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) { 245 int size_elems = segments.size_in_elems(seg_idx); 246 size_t size_bytes = segments.size_in_bytes(seg_idx); 247 248 size_t oop_offset = _buffer_used; 249 _buffer_used = oop_offset + size_bytes; 250 ensure_buffer_space(_buffer_used); 251 252 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0, 253 "Roots segment %zu start is not aligned: %zu", 254 segments.count(), oop_offset); 255 256 objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems); 257 for (int i = 0; i < size_elems; i++) { 258 root_segment_at_put(seg_oop, i, roots->at(root_index++)); 259 } 260 261 log_info(aot, heap)("archived obj root segment [%d] = %zu bytes, obj = " PTR_FORMAT, 262 size_elems, size_bytes, p2i(seg_oop)); 263 } 264 265 assert(root_index == roots->length(), "Post-condition: All roots are handled"); 266 267 _heap_root_segments = segments; 268 } 269 270 // The goal is to sort the objects in increasing order of: 271 // - objects that have only oop pointers 272 // - objects that have both native and oop pointers 273 // - objects that have only native pointers 274 // - objects that have no pointers 275 static int oop_sorting_rank(oop o) { 276 bool has_oop_ptr, has_native_ptr; 277 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr); 278 279 if (has_oop_ptr) { 280 if (!has_native_ptr) { 281 return 0; 282 } else { 283 return 1; 284 } 285 } else { 286 if (has_native_ptr) { 287 return 2; 288 } else { 289 return 3; 290 } 291 } 292 } 293 294 int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) { 295 int rank_a = a->_rank; 296 int rank_b = b->_rank; 297 298 if (rank_a != rank_b) { 299 return rank_a - rank_b; 300 } else { 301 // If they are the same rank, sort them by their position in the _source_objs array 302 return a->_index - b->_index; 303 } 304 } 305 306 void ArchiveHeapWriter::sort_source_objs() { 307 log_info(aot)("sorting heap objects"); 308 int len = _source_objs->length(); 309 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len); 310 311 for (int i = 0; i < len; i++) { 312 oop o = _source_objs->at(i); 313 int rank = oop_sorting_rank(o); 314 HeapObjOrder os = {i, rank}; 315 _source_objs_order->append(os); 316 } 317 log_info(aot)("computed ranks"); 318 _source_objs_order->sort(compare_objs_by_oop_fields); 319 log_info(aot)("sorting heap objects done"); 320 } 321 322 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 323 // There could be multiple root segments, which we want to be aligned by region. 324 // Putting them ahead of objects makes sure we waste no space. 325 copy_roots_to_buffer(roots); 326 327 sort_source_objs(); 328 for (int i = 0; i < _source_objs_order->length(); i++) { 329 int src_obj_index = _source_objs_order->at(i)._index; 330 oop src_obj = _source_objs->at(src_obj_index); 331 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj); 332 assert(info != nullptr, "must be"); 333 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj); 334 info->set_buffer_offset(buffer_offset); 335 assert(buffer_offset <= 0x7fffffff, "sanity"); 336 HeapShared::add_to_permanent_oop_table(src_obj, (int)buffer_offset); 337 338 OopHandle handle(Universe::vm_global(), src_obj); 339 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle); 340 _buffer_offset_to_source_obj_table->maybe_grow(); 341 342 if (java_lang_Module::is_instance(src_obj)) { 343 Modules::check_archived_module_oop(src_obj); 344 } 345 } 346 347 log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs", 348 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs); 349 log_info(cds)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes); 350 log_info(cds)(" packages = %8zu", _num_packages); 351 log_info(cds)(" protection domains = %8zu", _num_protection_domains); 352 } 353 354 size_t ArchiveHeapWriter::filler_array_byte_size(int length) { 355 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize; 356 return byte_size; 357 } 358 359 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) { 360 assert(is_object_aligned(fill_bytes), "must be"); 361 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 362 363 int initial_length = to_array_length(fill_bytes / elemSize); 364 for (int length = initial_length; length >= 0; length --) { 365 size_t array_byte_size = filler_array_byte_size(length); 366 if (array_byte_size == fill_bytes) { 367 return length; 368 } 369 } 370 371 ShouldNotReachHere(); 372 return -1; 373 } 374 375 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) { 376 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 377 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass 378 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used); 379 memset(mem, 0, fill_bytes); 380 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); 381 if (UseCompactObjectHeaders) { 382 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); 383 } else { 384 oopDesc::set_mark(mem, markWord::prototype()); 385 cast_to_oop(mem)->set_narrow_klass(nk); 386 } 387 arrayOopDesc::set_length(mem, array_length); 388 return mem; 389 } 390 391 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { 392 // We fill only with arrays (so we don't need to use a single HeapWord filler if the 393 // leftover space is smaller than a zero-sized array object). Therefore, we need to 394 // make sure there's enough space of min_filler_byte_size in the current region after 395 // required_byte_size has been allocated. If not, fill the remainder of the current 396 // region. 397 size_t min_filler_byte_size = filler_array_byte_size(0); 398 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size; 399 400 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 401 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 402 403 if (cur_min_region_bottom != next_min_region_bottom) { 404 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way 405 // we can map the region in any region-based collector. 406 assert(next_min_region_bottom > cur_min_region_bottom, "must be"); 407 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT, 408 "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT); 409 410 const size_t filler_end = next_min_region_bottom; 411 const size_t fill_bytes = filler_end - _buffer_used; 412 assert(fill_bytes > 0, "must be"); 413 ensure_buffer_space(filler_end); 414 415 int array_length = filler_array_length(fill_bytes); 416 log_info(aot, heap)("Inserting filler obj array of %d elements (%zu bytes total) @ buffer offset %zu", 417 array_length, fill_bytes, _buffer_used); 418 HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes); 419 _buffer_used = filler_end; 420 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes); 421 } 422 } 423 424 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) { 425 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr)); 426 if (p != nullptr) { 427 assert(*p > 0, "filler must be larger than zero bytes"); 428 return *p; 429 } else { 430 return 0; // buffered_addr is not a filler 431 } 432 } 433 434 template <typename T> 435 void update_buffered_object_field(address buffered_obj, int field_offset, T value) { 436 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset); 437 *field_addr = value; 438 } 439 440 void ArchiveHeapWriter::update_stats(oop src_obj) { 441 if (java_lang_String::is_instance(src_obj)) { 442 _num_strings ++; 443 _string_bytes += src_obj->size() * HeapWordSize; 444 _string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize; 445 } else { 446 Klass* k = src_obj->klass(); 447 Symbol* name = k->name(); 448 if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) { 449 _num_packages ++; 450 } else if (name->equals("java/security/ProtectionDomain")) { 451 _num_protection_domains ++; 452 } 453 } 454 } 455 456 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { 457 update_stats(src_obj); 458 459 assert(!is_too_large_to_archive(src_obj), "already checked"); 460 size_t byte_size = src_obj->size() * HeapWordSize; 461 assert(byte_size > 0, "no zero-size objects"); 462 463 // For region-based collectors such as G1, the archive heap may be mapped into 464 // multiple regions. We need to make sure that we don't have an object that can possible 465 // span across two regions. 466 maybe_fill_gc_region_gap(byte_size); 467 468 size_t new_used = _buffer_used + byte_size; 469 assert(new_used > _buffer_used, "no wrap around"); 470 471 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 472 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 473 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries"); 474 475 ensure_buffer_space(new_used); 476 477 address from = cast_from_oop<address>(src_obj); 478 address to = offset_to_buffered_address<address>(_buffer_used); 479 assert(is_object_aligned(_buffer_used), "sanity"); 480 assert(is_object_aligned(byte_size), "sanity"); 481 memcpy(to, from, byte_size); 482 483 // These native pointers will be restored explicitly at run time. 484 if (java_lang_Module::is_instance(src_obj)) { 485 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr); 486 } else if (java_lang_ClassLoader::is_instance(src_obj)) { 487 #ifdef ASSERT 488 // We only archive these loaders 489 if (src_obj != SystemDictionary::java_platform_loader() && 490 src_obj != SystemDictionary::java_system_loader()) { 491 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be"); 492 } 493 #endif 494 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr); 495 } 496 497 size_t buffered_obj_offset = _buffer_used; 498 _buffer_used = new_used; 499 500 return buffered_obj_offset; 501 } 502 503 void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) { 504 assert(!info->is_used(), "only set once"); 505 506 size_t heap_region_byte_size = _buffer_used; 507 assert(heap_region_byte_size > 0, "must archived at least one object!"); 508 509 if (UseCompressedOops) { 510 if (UseG1GC) { 511 address heap_end = (address)G1CollectedHeap::heap()->reserved().end(); 512 log_info(aot, heap)("Heap end = %p", heap_end); 513 _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes); 514 _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT); 515 assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity"); 516 } else { 517 _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT); 518 } 519 } else { 520 // We always write the objects as if the heap started at this address. This 521 // makes the contents of the archive heap deterministic. 522 // 523 // Note that at runtime, the heap address is selected by the OS, so the archive 524 // heap will not be mapped at 0x10000000, and the contents need to be patched. 525 _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT); 526 } 527 528 assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity"); 529 530 _requested_top = _requested_bottom + _buffer_used; 531 532 info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0), 533 offset_to_buffered_address<HeapWord*>(_buffer_used))); 534 info->set_heap_root_segments(_heap_root_segments); 535 } 536 537 // Oop relocation 538 539 template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) { 540 assert(is_in_requested_range(cast_to_oop(p)), "must be"); 541 542 address addr = address(p); 543 assert(addr >= _requested_bottom, "must be"); 544 size_t offset = addr - _requested_bottom; 545 return offset_to_buffered_address<T*>(offset); 546 } 547 548 template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) { 549 oop o = load_oop_from_buffer(buffered_addr); 550 assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop"); 551 return o; 552 } 553 554 template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr, 555 oop request_oop) { 556 assert(is_in_requested_range(request_oop), "must be"); 557 store_oop_in_buffer(buffered_addr, request_oop); 558 } 559 560 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) { 561 *buffered_addr = requested_obj; 562 } 563 564 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) { 565 narrowOop val = CompressedOops::encode_not_null(requested_obj); 566 *buffered_addr = val; 567 } 568 569 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) { 570 return *buffered_addr; 571 } 572 573 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) { 574 return CompressedOops::decode(*buffered_addr); 575 } 576 577 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) { 578 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer); 579 if (source_referent != nullptr) { 580 if (java_lang_Class::is_instance(source_referent)) { 581 // When the source object points to a "real" mirror, the buffered object should point 582 // to the "scratch" mirror, which has all unarchivable fields scrubbed (to be reinstated 583 // at run time). 584 source_referent = HeapShared::scratch_java_mirror(source_referent); 585 assert(source_referent != nullptr, "must be"); 586 } 587 oop request_referent = source_obj_to_requested_obj(source_referent); 588 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent); 589 mark_oop_pointer<T>(field_addr_in_buffer, oopmap); 590 } 591 } 592 593 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) { 594 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr)); 595 address requested_region_bottom; 596 597 assert(request_p >= (T*)_requested_bottom, "sanity"); 598 assert(request_p < (T*)_requested_top, "sanity"); 599 requested_region_bottom = _requested_bottom; 600 601 // Mark the pointer in the oopmap 602 T* region_bottom = (T*)requested_region_bottom; 603 assert(request_p >= region_bottom, "must be"); 604 BitMap::idx_t idx = request_p - region_bottom; 605 assert(idx < oopmap->size(), "overflow"); 606 oopmap->set_bit(idx); 607 } 608 609 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) { 610 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 611 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass); 612 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj)); 613 614 oop fake_oop = cast_to_oop(buffered_addr); 615 if (UseCompactObjectHeaders) { 616 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk)); 617 } else { 618 fake_oop->set_narrow_klass(nk); 619 } 620 621 if (src_obj == nullptr) { 622 return; 623 } 624 // We need to retain the identity_hash, because it may have been used by some hashtables 625 // in the shared heap. 626 if (!src_obj->fast_no_hash_check()) { 627 intptr_t src_hash = src_obj->identity_hash(); 628 if (UseCompactObjectHeaders) { 629 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); 630 } else { 631 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); 632 } 633 assert(fake_oop->mark().is_unlocked(), "sanity"); 634 635 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash()); 636 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash); 637 } 638 // Strip age bits. 639 fake_oop->set_mark(fake_oop->mark().set_age(0)); 640 } 641 642 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure { 643 oop _src_obj; 644 address _buffered_obj; 645 CHeapBitMap* _oopmap; 646 bool _is_java_lang_ref; 647 public: 648 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) : 649 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) 650 { 651 _is_java_lang_ref = AOTReferenceObjSupport::check_if_ref_obj(src_obj); 652 } 653 654 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); } 655 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); } 656 657 private: 658 template <class T> void do_oop_work(T *p) { 659 int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj)); 660 T* field_addr = (T*)(_buffered_obj + field_offset); 661 if (_is_java_lang_ref && AOTReferenceObjSupport::skip_field(field_offset)) { 662 // Do not copy these fields. Set them to null 663 *field_addr = (T)0x0; 664 } else { 665 ArchiveHeapWriter::relocate_field_in_buffer<T>(field_addr, _oopmap); 666 } 667 } 668 }; 669 670 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) { 671 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end). 672 size_t start = bitmap->find_first_set_bit(0); 673 size_t end = bitmap->size(); 674 log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which, 675 start, end, 676 start * 100 / total_bits, 677 end * 100 / total_bits, 678 (end - start) * 100 / total_bits); 679 } 680 681 // Update all oop fields embedded in the buffered objects 682 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, 683 ArchiveHeapInfo* heap_info) { 684 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 685 size_t heap_region_byte_size = _buffer_used; 686 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit); 687 688 for (int i = 0; i < _source_objs_order->length(); i++) { 689 int src_obj_index = _source_objs_order->at(i)._index; 690 oop src_obj = _source_objs->at(src_obj_index); 691 HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj); 692 assert(info != nullptr, "must be"); 693 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset()); 694 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass()); 695 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset()); 696 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap()); 697 src_obj->oop_iterate(&relocator); 698 }; 699 700 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and 701 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it. 702 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) { 703 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx); 704 705 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset); 706 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass()); 707 address buffered_obj = offset_to_buffered_address<address>(seg_offset); 708 int length = _heap_root_segments.size_in_elems(seg_idx); 709 710 if (UseCompressedOops) { 711 for (int i = 0; i < length; i++) { 712 narrowOop* addr = (narrowOop*)(buffered_obj + objArrayOopDesc::obj_at_offset<narrowOop>(i)); 713 relocate_field_in_buffer<narrowOop>(addr, heap_info->oopmap()); 714 } 715 } else { 716 for (int i = 0; i < length; i++) { 717 oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i)); 718 relocate_field_in_buffer<oop>(addr, heap_info->oopmap()); 719 } 720 } 721 } 722 723 compute_ptrmap(heap_info); 724 725 size_t total_bytes = (size_t)_buffer->length(); 726 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop))); 727 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address)); 728 } 729 730 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) { 731 Metadata* ptr = src_obj->metadata_field_acquire(field_offset); 732 if (ptr != nullptr) { 733 NativePointerInfo info; 734 info._src_obj = src_obj; 735 info._field_offset = field_offset; 736 _native_pointers->append(info); 737 HeapShared::set_has_native_pointers(src_obj); 738 _num_native_ptrs ++; 739 } 740 } 741 742 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj? 743 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, address buffered_obj, int field_offset) { 744 size_t offset = buffered_address_to_offset(buffered_obj) + checked_cast<size_t>(field_offset); // in bytes 745 BitMap::idx_t idx = checked_cast<BitMap::idx_t>(offset) / HeapWordSize; 746 // Leading zeros have been removed so some addresses may not be in the ptrmap 747 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos(); 748 if (idx < start_pos) { 749 return false; 750 } else { 751 idx -= start_pos; 752 } 753 return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true); 754 } 755 756 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) { 757 int num_non_null_ptrs = 0; 758 Metadata** bottom = (Metadata**) _requested_bottom; 759 Metadata** top = (Metadata**) _requested_top; // exclusive 760 heap_info->ptrmap()->resize(top - bottom); 761 762 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small 763 for (int i = 0; i < _native_pointers->length(); i++) { 764 NativePointerInfo info = _native_pointers->at(i); 765 oop src_obj = info._src_obj; 766 int field_offset = info._field_offset; 767 HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj); 768 // requested_field_addr = the address of this field in the requested space 769 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); 770 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset); 771 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check"); 772 773 // Mark this field in the bitmap 774 BitMap::idx_t idx = requested_field_addr - bottom; 775 heap_info->ptrmap()->set_bit(idx); 776 num_non_null_ptrs ++; 777 max_idx = MAX2(max_idx, idx); 778 779 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have 780 // this address if the RO/RW regions are mapped at the default location). 781 782 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr); 783 Metadata* native_ptr = *buffered_field_addr; 784 guarantee(native_ptr != nullptr, "sanity"); 785 guarantee(ArchiveBuilder::current()->has_been_buffered((address)native_ptr), 786 "Metadata %p should have been archived", native_ptr); 787 788 if (RegeneratedClasses::has_been_regenerated((address)native_ptr)) { 789 native_ptr = (Metadata*)RegeneratedClasses::get_regenerated_object((address)native_ptr); 790 } 791 792 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); 793 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); 794 *buffered_field_addr = (Metadata*)requested_native_ptr; 795 } 796 797 heap_info->ptrmap()->resize(max_idx + 1); 798 log_info(aot, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (%zu bits)", 799 num_non_null_ptrs, size_t(heap_info->ptrmap()->size())); 800 } 801 802 #endif // INCLUDE_CDS_JAVA_HEAP