1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveHeapWriter.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/filemap.hpp" 29 #include "cds/heapShared.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "memory/iterator.inline.hpp" 33 #include "memory/oopFactory.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/compressedOops.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/objArrayOop.inline.hpp" 38 #include "oops/oopHandle.inline.hpp" 39 #include "oops/typeArrayKlass.hpp" 40 #include "oops/typeArrayOop.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "utilities/bitMap.inline.hpp" 44 #if INCLUDE_G1GC 45 #include "gc/g1/g1CollectedHeap.hpp" 46 #include "gc/g1/g1HeapRegion.hpp" 47 #endif 48 49 #if INCLUDE_CDS_JAVA_HEAP 50 51 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr; 52 53 // The following are offsets from buffer_bottom() 54 size_t ArchiveHeapWriter::_buffer_used; 55 size_t ArchiveHeapWriter::_heap_roots_offset; 56 57 size_t ArchiveHeapWriter::_heap_roots_word_size; 58 59 address ArchiveHeapWriter::_requested_bottom; 60 address ArchiveHeapWriter::_requested_top; 61 62 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers; 63 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs; 64 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order; 65 66 ArchiveHeapWriter::BufferOffsetToSourceObjectTable* 67 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr; 68 69 70 typedef ResourceHashtable<address, size_t, 71 127, // prime number 72 AnyObj::C_HEAP, 73 mtClassShared> FillersTable; 74 static FillersTable* _fillers; 75 static int _num_native_ptrs = 0; 76 77 void ArchiveHeapWriter::init() { 78 if (HeapShared::can_write()) { 79 Universe::heap()->collect(GCCause::_java_lang_system_gc); 80 81 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M); 82 _fillers = new FillersTable(); 83 _requested_bottom = nullptr; 84 _requested_top = nullptr; 85 86 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048); 87 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000); 88 89 guarantee(UseG1GC, "implementation limitation"); 90 guarantee(MIN_GC_REGION_ALIGNMENT <= /*G1*/HeapRegion::min_region_size_in_words() * HeapWordSize, "must be"); 91 } 92 } 93 94 void ArchiveHeapWriter::add_source_obj(oop src_obj) { 95 _source_objs->append(src_obj); 96 } 97 98 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots, 99 ArchiveHeapInfo* heap_info) { 100 assert(HeapShared::can_write(), "sanity"); 101 allocate_buffer(); 102 copy_source_objs_to_buffer(roots); 103 set_requested_address(heap_info); 104 relocate_embedded_oops(roots, heap_info); 105 } 106 107 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) { 108 return is_too_large_to_archive(o->size()); 109 } 110 111 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) { 112 typeArrayOop value = java_lang_String::value_no_keepalive(string); 113 return is_too_large_to_archive(value); 114 } 115 116 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) { 117 assert(size > 0, "no zero-size object"); 118 assert(size * HeapWordSize > size, "no overflow"); 119 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive"); 120 121 size_t byte_size = size * HeapWordSize; 122 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) { 123 return true; 124 } else { 125 return false; 126 } 127 } 128 129 // Various lookup functions between source_obj, buffered_obj and requested_obj 130 bool ArchiveHeapWriter::is_in_requested_range(oop o) { 131 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized"); 132 address a = cast_from_oop<address>(o); 133 return (_requested_bottom <= a && a < _requested_top); 134 } 135 136 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) { 137 oop req_obj = cast_to_oop(_requested_bottom + offset); 138 assert(is_in_requested_range(req_obj), "must be"); 139 return req_obj; 140 } 141 142 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) { 143 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 144 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 145 if (p != nullptr) { 146 return requested_obj_from_buffer_offset(p->buffer_offset()); 147 } else { 148 return nullptr; 149 } 150 } 151 152 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) { 153 oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr)); 154 if (p != nullptr) { 155 return *p; 156 } else { 157 return nullptr; 158 } 159 } 160 161 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) { 162 return _requested_bottom + buffered_address_to_offset(buffered_addr); 163 } 164 165 oop ArchiveHeapWriter::heap_roots_requested_address() { 166 return cast_to_oop(_requested_bottom + _heap_roots_offset); 167 } 168 169 address ArchiveHeapWriter::requested_address() { 170 assert(_buffer != nullptr, "must be initialized"); 171 return _requested_bottom; 172 } 173 174 void ArchiveHeapWriter::allocate_buffer() { 175 int initial_buffer_size = 100000; 176 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size); 177 _buffer_used = 0; 178 ensure_buffer_space(1); // so that buffer_bottom() works 179 } 180 181 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) { 182 // We usually have very small heaps. If we get a huge one it's probably caused by a bug. 183 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects"); 184 _buffer->at_grow(to_array_index(min_bytes)); 185 } 186 187 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 188 Klass* k = Universe::objectArrayKlass(); // already relocated to point to archived klass 189 int length = roots->length(); 190 _heap_roots_word_size = objArrayOopDesc::object_size(length); 191 size_t byte_size = _heap_roots_word_size * HeapWordSize; 192 if (byte_size >= MIN_GC_REGION_ALIGNMENT) { 193 log_error(cds, heap)("roots array is too large. Please reduce the number of classes"); 194 vm_exit(1); 195 } 196 197 maybe_fill_gc_region_gap(byte_size); 198 199 size_t new_used = _buffer_used + byte_size; 200 ensure_buffer_space(new_used); 201 202 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used); 203 memset(mem, 0, byte_size); 204 { 205 // This is copied from MemAllocator::finish 206 if (UseCompactObjectHeaders) { 207 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(k); 208 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); 209 } else { 210 oopDesc::set_mark(mem, markWord::prototype()); 211 oopDesc::release_set_klass(mem, k); 212 } 213 } 214 { 215 // This is copied from ObjArrayAllocator::initialize 216 arrayOopDesc::set_length(mem, length); 217 } 218 219 objArrayOop arrayOop = objArrayOop(cast_to_oop(mem)); 220 for (int i = 0; i < length; i++) { 221 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap! 222 oop o = roots->at(i); 223 if (UseCompressedOops) { 224 * arrayOop->obj_at_addr<narrowOop>(i) = CompressedOops::encode(o); 225 } else { 226 * arrayOop->obj_at_addr<oop>(i) = o; 227 } 228 } 229 log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem); 230 231 _heap_roots_offset = _buffer_used; 232 _buffer_used = new_used; 233 } 234 235 static int oop_sorting_rank(oop o) { 236 bool has_oop_ptr, has_native_ptr; 237 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr); 238 239 if (!has_oop_ptr) { 240 if (!has_native_ptr) { 241 return 0; 242 } else { 243 return 1; 244 } 245 } else { 246 if (has_native_ptr) { 247 return 2; 248 } else { 249 return 3; 250 } 251 } 252 } 253 254 // The goal is to sort the objects in increasing order of: 255 // - objects that have no pointers 256 // - objects that have only native pointers 257 // - objects that have both native and oop pointers 258 // - objects that have only oop pointers 259 int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) { 260 int rank_a = a->_rank; 261 int rank_b = b->_rank; 262 263 if (rank_a != rank_b) { 264 return rank_a - rank_b; 265 } else { 266 // If they are the same rank, sort them by their position in the _source_objs array 267 return a->_index - b->_index; 268 } 269 } 270 271 void ArchiveHeapWriter::sort_source_objs() { 272 log_info(cds)("sorting heap objects"); 273 int len = _source_objs->length(); 274 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len); 275 276 for (int i = 0; i < len; i++) { 277 oop o = _source_objs->at(i); 278 int rank = oop_sorting_rank(o); 279 HeapObjOrder os = {i, rank}; 280 _source_objs_order->append(os); 281 } 282 log_info(cds)("computed ranks"); 283 _source_objs_order->sort(compare_objs_by_oop_fields); 284 log_info(cds)("sorting heap objects done"); 285 } 286 287 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 288 sort_source_objs(); 289 for (int i = 0; i < _source_objs_order->length(); i++) { 290 int src_obj_index = _source_objs_order->at(i)._index; 291 oop src_obj = _source_objs->at(src_obj_index); 292 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj); 293 assert(info != nullptr, "must be"); 294 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj); 295 info->set_buffer_offset(buffer_offset); 296 297 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj); 298 _buffer_offset_to_source_obj_table->maybe_grow(); 299 } 300 301 copy_roots_to_buffer(roots); 302 303 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs", 304 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs); 305 } 306 307 size_t ArchiveHeapWriter::filler_array_byte_size(int length) { 308 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize; 309 return byte_size; 310 } 311 312 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) { 313 assert(is_object_aligned(fill_bytes), "must be"); 314 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 315 316 int initial_length = to_array_length(fill_bytes / elemSize); 317 for (int length = initial_length; length >= 0; length --) { 318 size_t array_byte_size = filler_array_byte_size(length); 319 if (array_byte_size == fill_bytes) { 320 return length; 321 } 322 } 323 324 ShouldNotReachHere(); 325 return -1; 326 } 327 328 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) { 329 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 330 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass 331 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used); 332 memset(mem, 0, fill_bytes); 333 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); 334 if (UseCompactObjectHeaders) { 335 oopDesc::release_set_mark(mem, markWord::prototype().set_narrow_klass(nk)); 336 } else { 337 oopDesc::set_mark(mem, markWord::prototype()); 338 cast_to_oop(mem)->set_narrow_klass(nk); 339 } 340 arrayOopDesc::set_length(mem, array_length); 341 return mem; 342 } 343 344 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { 345 // We fill only with arrays (so we don't need to use a single HeapWord filler if the 346 // leftover space is smaller than a zero-sized array object). Therefore, we need to 347 // make sure there's enough space of min_filler_byte_size in the current region after 348 // required_byte_size has been allocated. If not, fill the remainder of the current 349 // region. 350 size_t min_filler_byte_size = filler_array_byte_size(0); 351 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size; 352 353 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 354 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 355 356 if (cur_min_region_bottom != next_min_region_bottom) { 357 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way 358 // we can map the region in any region-based collector. 359 assert(next_min_region_bottom > cur_min_region_bottom, "must be"); 360 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT, 361 "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT); 362 363 const size_t filler_end = next_min_region_bottom; 364 const size_t fill_bytes = filler_end - _buffer_used; 365 assert(fill_bytes > 0, "must be"); 366 ensure_buffer_space(filler_end); 367 368 int array_length = filler_array_length(fill_bytes); 369 log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT, 370 array_length, fill_bytes, _buffer_used); 371 HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes); 372 _buffer_used = filler_end; 373 _fillers->put((address)filler, fill_bytes); 374 } 375 } 376 377 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) { 378 size_t* p = _fillers->get(buffered_addr); 379 if (p != nullptr) { 380 assert(*p > 0, "filler must be larger than zero bytes"); 381 return *p; 382 } else { 383 return 0; // buffered_addr is not a filler 384 } 385 } 386 387 template <typename T> 388 void update_buffered_object_field(address buffered_obj, int field_offset, T value) { 389 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset); 390 *field_addr = value; 391 } 392 393 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { 394 assert(!is_too_large_to_archive(src_obj), "already checked"); 395 size_t byte_size = src_obj->size() * HeapWordSize; 396 assert(byte_size > 0, "no zero-size objects"); 397 398 // For region-based collectors such as G1, the archive heap may be mapped into 399 // multiple regions. We need to make sure that we don't have an object that can possible 400 // span across two regions. 401 maybe_fill_gc_region_gap(byte_size); 402 403 size_t new_used = _buffer_used + byte_size; 404 assert(new_used > _buffer_used, "no wrap around"); 405 406 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 407 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 408 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries"); 409 410 ensure_buffer_space(new_used); 411 412 address from = cast_from_oop<address>(src_obj); 413 address to = offset_to_buffered_address<address>(_buffer_used); 414 assert(is_object_aligned(_buffer_used), "sanity"); 415 assert(is_object_aligned(byte_size), "sanity"); 416 memcpy(to, from, byte_size); 417 418 // These native pointers will be restored explicitly at run time. 419 if (java_lang_Module::is_instance(src_obj)) { 420 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr); 421 } else if (java_lang_ClassLoader::is_instance(src_obj)) { 422 #ifdef ASSERT 423 // We only archive these loaders 424 if (src_obj != SystemDictionary::java_platform_loader() && 425 src_obj != SystemDictionary::java_system_loader()) { 426 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be"); 427 } 428 #endif 429 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr); 430 } 431 432 size_t buffered_obj_offset = _buffer_used; 433 _buffer_used = new_used; 434 435 return buffered_obj_offset; 436 } 437 438 void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) { 439 assert(!info->is_used(), "only set once"); 440 assert(UseG1GC, "must be"); 441 address heap_end = (address)G1CollectedHeap::heap()->reserved().end(); 442 log_info(cds, heap)("Heap end = %p", heap_end); 443 444 size_t heap_region_byte_size = _buffer_used; 445 assert(heap_region_byte_size > 0, "must archived at least one object!"); 446 447 448 if (UseCompressedOops) { 449 _requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes); 450 } else { 451 // We always write the objects as if the heap started at this address. This 452 // makes the contents of the archive heap deterministic. 453 // 454 // Note that at runtime, the heap address is selected by the OS, so the archive 455 // heap will not be mapped at 0x10000000, and the contents need to be patched. 456 _requested_bottom = (address)NOCOOPS_REQUESTED_BASE; 457 } 458 459 assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity"); 460 461 _requested_top = _requested_bottom + _buffer_used; 462 463 info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0), 464 offset_to_buffered_address<HeapWord*>(_buffer_used))); 465 info->set_heap_roots_offset(_heap_roots_offset); 466 } 467 468 // Oop relocation 469 470 template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) { 471 assert(is_in_requested_range(cast_to_oop(p)), "must be"); 472 473 address addr = address(p); 474 assert(addr >= _requested_bottom, "must be"); 475 size_t offset = addr - _requested_bottom; 476 return offset_to_buffered_address<T*>(offset); 477 } 478 479 template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) { 480 oop o = load_oop_from_buffer(buffered_addr); 481 assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop"); 482 return o; 483 } 484 485 template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr, 486 oop request_oop) { 487 assert(is_in_requested_range(request_oop), "must be"); 488 store_oop_in_buffer(buffered_addr, request_oop); 489 } 490 491 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) { 492 *buffered_addr = requested_obj; 493 } 494 495 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) { 496 narrowOop val = CompressedOops::encode_not_null(requested_obj); 497 *buffered_addr = val; 498 } 499 500 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) { 501 return *buffered_addr; 502 } 503 504 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) { 505 return CompressedOops::decode(*buffered_addr); 506 } 507 508 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) { 509 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer); 510 if (!CompressedOops::is_null(source_referent)) { 511 oop request_referent = source_obj_to_requested_obj(source_referent); 512 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent); 513 mark_oop_pointer<T>(field_addr_in_buffer, oopmap); 514 } 515 } 516 517 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) { 518 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr)); 519 address requested_region_bottom; 520 521 assert(request_p >= (T*)_requested_bottom, "sanity"); 522 assert(request_p < (T*)_requested_top, "sanity"); 523 requested_region_bottom = _requested_bottom; 524 525 // Mark the pointer in the oopmap 526 T* region_bottom = (T*)requested_region_bottom; 527 assert(request_p >= region_bottom, "must be"); 528 BitMap::idx_t idx = request_p - region_bottom; 529 assert(idx < oopmap->size(), "overflow"); 530 oopmap->set_bit(idx); 531 } 532 533 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) { 534 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 535 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass); 536 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj)); 537 538 oop fake_oop = cast_to_oop(buffered_addr); 539 if (!UseCompactObjectHeaders) { 540 fake_oop->set_narrow_klass(nk); 541 } 542 543 // We need to retain the identity_hash, because it may have been used by some hashtables 544 // in the shared heap. This also has the side effect of pre-initializing the 545 // identity_hash for all shared objects, so they are less likely to be written 546 // into during run time, increasing the potential of memory sharing. 547 if (src_obj != nullptr) { 548 intptr_t src_hash = src_obj->identity_hash(); 549 if (UseCompactObjectHeaders) { 550 fake_oop->set_mark(markWord::prototype().set_narrow_klass(nk).copy_set_hash(src_hash)); 551 } else { 552 fake_oop->set_mark(markWord::prototype().copy_set_hash(src_hash)); 553 } 554 assert(fake_oop->mark().is_unlocked(), "sanity"); 555 556 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash()); 557 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash); 558 } 559 } 560 561 // Relocate an element in the buffered copy of HeapShared::roots() 562 template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) { 563 size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index); 564 relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap); 565 } 566 567 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure { 568 oop _src_obj; 569 address _buffered_obj; 570 CHeapBitMap* _oopmap; 571 572 public: 573 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) : 574 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {} 575 576 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); } 577 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); } 578 579 private: 580 template <class T> void do_oop_work(T *p) { 581 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char)); 582 ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap); 583 } 584 }; 585 586 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) { 587 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end). 588 size_t start = bitmap->find_first_set_bit(0); 589 size_t end = bitmap->size(); 590 log_info(cds)("%s = " SIZE_FORMAT_W(7) " ... " SIZE_FORMAT_W(7) " (%3zu%% ... %3zu%% = %3zu%%)", which, 591 start, end, 592 start * 100 / total_bits, 593 end * 100 / total_bits, 594 (end - start) * 100 / total_bits); 595 } 596 597 // Update all oop fields embedded in the buffered objects 598 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, 599 ArchiveHeapInfo* heap_info) { 600 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 601 size_t heap_region_byte_size = _buffer_used; 602 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit); 603 604 for (int i = 0; i < _source_objs_order->length(); i++) { 605 int src_obj_index = _source_objs_order->at(i)._index; 606 oop src_obj = _source_objs->at(src_obj_index); 607 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj); 608 assert(info != nullptr, "must be"); 609 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset()); 610 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass()); 611 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset()); 612 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap()); 613 src_obj->oop_iterate(&relocator); 614 }; 615 616 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and 617 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it. 618 oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_offset); 619 update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlass()); 620 int length = roots != nullptr ? roots->length() : 0; 621 for (int i = 0; i < length; i++) { 622 if (UseCompressedOops) { 623 relocate_root_at<narrowOop>(requested_roots, i, heap_info->oopmap()); 624 } else { 625 relocate_root_at<oop>(requested_roots, i, heap_info->oopmap()); 626 } 627 } 628 629 compute_ptrmap(heap_info); 630 631 size_t total_bytes = (size_t)_buffer->length(); 632 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop))); 633 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address)); 634 } 635 636 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) { 637 Metadata* ptr = src_obj->metadata_field_acquire(field_offset); 638 if (ptr != nullptr) { 639 NativePointerInfo info; 640 info._src_obj = src_obj; 641 info._field_offset = field_offset; 642 _native_pointers->append(info); 643 HeapShared::set_has_native_pointers(src_obj); 644 _num_native_ptrs ++; 645 } 646 } 647 648 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj? 649 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) { 650 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 651 assert(p != nullptr, "must be"); 652 653 // requested_field_addr = the address of this field in the requested space 654 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); 655 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset); 656 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check"); 657 658 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom; 659 // Leading zeros have been removed so some addresses may not be in the ptrmap 660 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos(); 661 if (idx < start_pos) { 662 return false; 663 } else { 664 idx -= start_pos; 665 } 666 return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true); 667 } 668 669 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) { 670 int num_non_null_ptrs = 0; 671 Metadata** bottom = (Metadata**) _requested_bottom; 672 Metadata** top = (Metadata**) _requested_top; // exclusive 673 heap_info->ptrmap()->resize(top - bottom); 674 675 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small 676 for (int i = 0; i < _native_pointers->length(); i++) { 677 NativePointerInfo info = _native_pointers->at(i); 678 oop src_obj = info._src_obj; 679 int field_offset = info._field_offset; 680 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 681 // requested_field_addr = the address of this field in the requested space 682 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); 683 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset); 684 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check"); 685 686 // Mark this field in the bitmap 687 BitMap::idx_t idx = requested_field_addr - bottom; 688 heap_info->ptrmap()->set_bit(idx); 689 num_non_null_ptrs ++; 690 max_idx = MAX2(max_idx, idx); 691 692 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have 693 // this address if the RO/RW regions are mapped at the default location). 694 695 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr); 696 Metadata* native_ptr = *buffered_field_addr; 697 assert(native_ptr != nullptr, "sanity"); 698 699 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); 700 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); 701 *buffered_field_addr = (Metadata*)requested_native_ptr; 702 } 703 704 heap_info->ptrmap()->resize(max_idx + 1); 705 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)", 706 num_non_null_ptrs, size_t(heap_info->ptrmap()->size())); 707 } 708 709 #endif // INCLUDE_CDS_JAVA_HEAP