1 /* 2 * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "cds/archiveHeapWriter.hpp" 27 #include "cds/cdsConfig.hpp" 28 #include "cds/filemap.hpp" 29 #include "cds/heapShared.hpp" 30 #include "classfile/systemDictionary.hpp" 31 #include "gc/shared/collectedHeap.hpp" 32 #include "memory/iterator.inline.hpp" 33 #include "memory/oopFactory.hpp" 34 #include "memory/universe.hpp" 35 #include "oops/compressedOops.hpp" 36 #include "oops/oop.inline.hpp" 37 #include "oops/objArrayOop.inline.hpp" 38 #include "oops/oopHandle.inline.hpp" 39 #include "oops/typeArrayKlass.hpp" 40 #include "oops/typeArrayOop.hpp" 41 #include "runtime/java.hpp" 42 #include "runtime/mutexLocker.hpp" 43 #include "utilities/bitMap.inline.hpp" 44 #if INCLUDE_G1GC 45 #include "gc/g1/g1CollectedHeap.hpp" 46 #include "gc/g1/g1HeapRegion.hpp" 47 #endif 48 49 #if INCLUDE_CDS_JAVA_HEAP 50 51 GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr; 52 53 // The following are offsets from buffer_bottom() 54 size_t ArchiveHeapWriter::_buffer_used; 55 56 // Heap root segments 57 HeapRootSegments ArchiveHeapWriter::_heap_root_segments; 58 59 address ArchiveHeapWriter::_requested_bottom; 60 address ArchiveHeapWriter::_requested_top; 61 62 GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers; 63 GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs; 64 GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order; 65 66 ArchiveHeapWriter::BufferOffsetToSourceObjectTable* 67 ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr; 68 69 70 typedef ResourceHashtable< 71 size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom() 72 size_t, // size of this filler (in bytes) 73 127, // prime number 74 AnyObj::C_HEAP, 75 mtClassShared> FillersTable; 76 static FillersTable* _fillers; 77 static int _num_native_ptrs = 0; 78 79 void ArchiveHeapWriter::init() { 80 if (HeapShared::can_write()) { 81 Universe::heap()->collect(GCCause::_java_lang_system_gc); 82 83 _buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M); 84 _fillers = new FillersTable(); 85 _requested_bottom = nullptr; 86 _requested_top = nullptr; 87 88 _native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048); 89 _source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000); 90 91 guarantee(MIN_GC_REGION_ALIGNMENT <= G1HeapRegion::min_region_size_in_words() * HeapWordSize, "must be"); 92 } 93 } 94 95 void ArchiveHeapWriter::add_source_obj(oop src_obj) { 96 _source_objs->append(src_obj); 97 } 98 99 void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots, 100 ArchiveHeapInfo* heap_info) { 101 assert(HeapShared::can_write(), "sanity"); 102 allocate_buffer(); 103 copy_source_objs_to_buffer(roots); 104 set_requested_address(heap_info); 105 relocate_embedded_oops(roots, heap_info); 106 } 107 108 bool ArchiveHeapWriter::is_too_large_to_archive(oop o) { 109 return is_too_large_to_archive(o->size()); 110 } 111 112 bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) { 113 typeArrayOop value = java_lang_String::value_no_keepalive(string); 114 return is_too_large_to_archive(value); 115 } 116 117 bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) { 118 assert(size > 0, "no zero-size object"); 119 assert(size * HeapWordSize > size, "no overflow"); 120 static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive"); 121 122 size_t byte_size = size * HeapWordSize; 123 if (byte_size > size_t(MIN_GC_REGION_ALIGNMENT)) { 124 return true; 125 } else { 126 return false; 127 } 128 } 129 130 // Various lookup functions between source_obj, buffered_obj and requested_obj 131 bool ArchiveHeapWriter::is_in_requested_range(oop o) { 132 assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized"); 133 address a = cast_from_oop<address>(o); 134 return (_requested_bottom <= a && a < _requested_top); 135 } 136 137 oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) { 138 oop req_obj = cast_to_oop(_requested_bottom + offset); 139 assert(is_in_requested_range(req_obj), "must be"); 140 return req_obj; 141 } 142 143 oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) { 144 assert(CDSConfig::is_dumping_heap(), "dump-time only"); 145 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 146 if (p != nullptr) { 147 return requested_obj_from_buffer_offset(p->buffer_offset()); 148 } else { 149 return nullptr; 150 } 151 } 152 153 oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) { 154 oop* p = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr)); 155 if (p != nullptr) { 156 return *p; 157 } else { 158 return nullptr; 159 } 160 } 161 162 address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) { 163 return _requested_bottom + buffered_address_to_offset(buffered_addr); 164 } 165 166 address ArchiveHeapWriter::requested_address() { 167 assert(_buffer != nullptr, "must be initialized"); 168 return _requested_bottom; 169 } 170 171 void ArchiveHeapWriter::allocate_buffer() { 172 int initial_buffer_size = 100000; 173 _buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size); 174 _buffer_used = 0; 175 ensure_buffer_space(1); // so that buffer_bottom() works 176 } 177 178 void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) { 179 // We usually have very small heaps. If we get a huge one it's probably caused by a bug. 180 guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects"); 181 _buffer->at_grow(to_array_index(min_bytes)); 182 } 183 184 objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) { 185 HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset); 186 memset(mem, 0, objArrayOopDesc::object_size(element_count)); 187 188 // The initialization code is copied from MemAllocator::finish and ObjArrayAllocator::initialize. 189 oopDesc::set_mark(mem, markWord::prototype()); 190 oopDesc::release_set_klass(mem, Universe::objectArrayKlass()); 191 arrayOopDesc::set_length(mem, element_count); 192 return objArrayOop(cast_to_oop(mem)); 193 } 194 195 void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) { 196 // Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap! 197 if (UseCompressedOops) { 198 *segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root); 199 } else { 200 *segment->obj_at_addr<oop>(index) = root; 201 } 202 } 203 204 void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 205 // Depending on the number of classes we are archiving, a single roots array may be 206 // larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which 207 // allows us to chop the large array into a series of "segments". Current layout 208 // starts with zero or more segments exactly fitting MIN_GC_REGION_ALIGNMENT, and end 209 // with a single segment that may be smaller than MIN_GC_REGION_ALIGNMENT. 210 // This is simple and efficient. We do not need filler objects anywhere between the segments, 211 // or immediately after the last segment. This allows starting the object dump immediately 212 // after the roots. 213 214 assert((_buffer_used % MIN_GC_REGION_ALIGNMENT) == 0, 215 "Pre-condition: Roots start at aligned boundary: " SIZE_FORMAT, _buffer_used); 216 217 int max_elem_count = ((MIN_GC_REGION_ALIGNMENT - arrayOopDesc::header_size_in_bytes()) / heapOopSize); 218 assert(objArrayOopDesc::object_size(max_elem_count)*HeapWordSize == MIN_GC_REGION_ALIGNMENT, 219 "Should match exactly"); 220 221 HeapRootSegments segments(_buffer_used, 222 roots->length(), 223 MIN_GC_REGION_ALIGNMENT, 224 max_elem_count); 225 226 int root_index = 0; 227 for (size_t seg_idx = 0; seg_idx < segments.count(); seg_idx++) { 228 int size_elems = segments.size_in_elems(seg_idx); 229 size_t size_bytes = segments.size_in_bytes(seg_idx); 230 231 size_t oop_offset = _buffer_used; 232 _buffer_used = oop_offset + size_bytes; 233 ensure_buffer_space(_buffer_used); 234 235 assert((oop_offset % MIN_GC_REGION_ALIGNMENT) == 0, 236 "Roots segment " SIZE_FORMAT " start is not aligned: " SIZE_FORMAT, 237 segments.count(), oop_offset); 238 239 objArrayOop seg_oop = allocate_root_segment(oop_offset, size_elems); 240 for (int i = 0; i < size_elems; i++) { 241 root_segment_at_put(seg_oop, i, roots->at(root_index++)); 242 } 243 244 log_info(cds, heap)("archived obj root segment [%d] = " SIZE_FORMAT " bytes, obj = " PTR_FORMAT, 245 size_elems, size_bytes, p2i(seg_oop)); 246 } 247 248 assert(root_index == roots->length(), "Post-condition: All roots are handled"); 249 250 _heap_root_segments = segments; 251 } 252 253 // The goal is to sort the objects in increasing order of: 254 // - objects that have only oop pointers 255 // - objects that have both native and oop pointers 256 // - objects that have only native pointers 257 // - objects that have no pointers 258 static int oop_sorting_rank(oop o) { 259 bool has_oop_ptr, has_native_ptr; 260 HeapShared::get_pointer_info(o, has_oop_ptr, has_native_ptr); 261 262 if (has_oop_ptr) { 263 if (!has_native_ptr) { 264 return 0; 265 } else { 266 return 1; 267 } 268 } else { 269 if (has_native_ptr) { 270 return 2; 271 } else { 272 return 3; 273 } 274 } 275 } 276 277 int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) { 278 int rank_a = a->_rank; 279 int rank_b = b->_rank; 280 281 if (rank_a != rank_b) { 282 return rank_a - rank_b; 283 } else { 284 // If they are the same rank, sort them by their position in the _source_objs array 285 return a->_index - b->_index; 286 } 287 } 288 289 void ArchiveHeapWriter::sort_source_objs() { 290 log_info(cds)("sorting heap objects"); 291 int len = _source_objs->length(); 292 _source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len); 293 294 for (int i = 0; i < len; i++) { 295 oop o = _source_objs->at(i); 296 int rank = oop_sorting_rank(o); 297 HeapObjOrder os = {i, rank}; 298 _source_objs_order->append(os); 299 } 300 log_info(cds)("computed ranks"); 301 _source_objs_order->sort(compare_objs_by_oop_fields); 302 log_info(cds)("sorting heap objects done"); 303 } 304 305 void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) { 306 // There could be multiple root segments, which we want to be aligned by region. 307 // Putting them ahead of objects makes sure we waste no space. 308 copy_roots_to_buffer(roots); 309 310 sort_source_objs(); 311 for (int i = 0; i < _source_objs_order->length(); i++) { 312 int src_obj_index = _source_objs_order->at(i)._index; 313 oop src_obj = _source_objs->at(src_obj_index); 314 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj); 315 assert(info != nullptr, "must be"); 316 size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj); 317 info->set_buffer_offset(buffer_offset); 318 319 _buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, src_obj); 320 _buffer_offset_to_source_obj_table->maybe_grow(); 321 } 322 323 log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots, %d native ptrs", 324 _buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs); 325 } 326 327 size_t ArchiveHeapWriter::filler_array_byte_size(int length) { 328 size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize; 329 return byte_size; 330 } 331 332 int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) { 333 assert(is_object_aligned(fill_bytes), "must be"); 334 size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 335 336 int initial_length = to_array_length(fill_bytes / elemSize); 337 for (int length = initial_length; length >= 0; length --) { 338 size_t array_byte_size = filler_array_byte_size(length); 339 if (array_byte_size == fill_bytes) { 340 return length; 341 } 342 } 343 344 ShouldNotReachHere(); 345 return -1; 346 } 347 348 HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) { 349 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 350 Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass 351 HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used); 352 memset(mem, 0, fill_bytes); 353 oopDesc::set_mark(mem, markWord::prototype()); 354 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak); 355 cast_to_oop(mem)->set_narrow_klass(nk); 356 arrayOopDesc::set_length(mem, array_length); 357 return mem; 358 } 359 360 void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) { 361 // We fill only with arrays (so we don't need to use a single HeapWord filler if the 362 // leftover space is smaller than a zero-sized array object). Therefore, we need to 363 // make sure there's enough space of min_filler_byte_size in the current region after 364 // required_byte_size has been allocated. If not, fill the remainder of the current 365 // region. 366 size_t min_filler_byte_size = filler_array_byte_size(0); 367 size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size; 368 369 const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 370 const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 371 372 if (cur_min_region_bottom != next_min_region_bottom) { 373 // Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way 374 // we can map the region in any region-based collector. 375 assert(next_min_region_bottom > cur_min_region_bottom, "must be"); 376 assert(next_min_region_bottom - cur_min_region_bottom == MIN_GC_REGION_ALIGNMENT, 377 "no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT); 378 379 const size_t filler_end = next_min_region_bottom; 380 const size_t fill_bytes = filler_end - _buffer_used; 381 assert(fill_bytes > 0, "must be"); 382 ensure_buffer_space(filler_end); 383 384 int array_length = filler_array_length(fill_bytes); 385 log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT, 386 array_length, fill_bytes, _buffer_used); 387 HeapWord* filler = init_filler_array_at_buffer_top(array_length, fill_bytes); 388 _buffer_used = filler_end; 389 _fillers->put(buffered_address_to_offset((address)filler), fill_bytes); 390 } 391 } 392 393 size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) { 394 size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr)); 395 if (p != nullptr) { 396 assert(*p > 0, "filler must be larger than zero bytes"); 397 return *p; 398 } else { 399 return 0; // buffered_addr is not a filler 400 } 401 } 402 403 template <typename T> 404 void update_buffered_object_field(address buffered_obj, int field_offset, T value) { 405 T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset); 406 *field_addr = value; 407 } 408 409 size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) { 410 assert(!is_too_large_to_archive(src_obj), "already checked"); 411 size_t byte_size = src_obj->size() * HeapWordSize; 412 assert(byte_size > 0, "no zero-size objects"); 413 414 // For region-based collectors such as G1, the archive heap may be mapped into 415 // multiple regions. We need to make sure that we don't have an object that can possible 416 // span across two regions. 417 maybe_fill_gc_region_gap(byte_size); 418 419 size_t new_used = _buffer_used + byte_size; 420 assert(new_used > _buffer_used, "no wrap around"); 421 422 size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT); 423 size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT); 424 assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries"); 425 426 ensure_buffer_space(new_used); 427 428 address from = cast_from_oop<address>(src_obj); 429 address to = offset_to_buffered_address<address>(_buffer_used); 430 assert(is_object_aligned(_buffer_used), "sanity"); 431 assert(is_object_aligned(byte_size), "sanity"); 432 memcpy(to, from, byte_size); 433 434 // These native pointers will be restored explicitly at run time. 435 if (java_lang_Module::is_instance(src_obj)) { 436 update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr); 437 } else if (java_lang_ClassLoader::is_instance(src_obj)) { 438 #ifdef ASSERT 439 // We only archive these loaders 440 if (src_obj != SystemDictionary::java_platform_loader() && 441 src_obj != SystemDictionary::java_system_loader()) { 442 assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be"); 443 } 444 #endif 445 update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr); 446 } 447 448 size_t buffered_obj_offset = _buffer_used; 449 _buffer_used = new_used; 450 451 return buffered_obj_offset; 452 } 453 454 void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) { 455 assert(!info->is_used(), "only set once"); 456 457 size_t heap_region_byte_size = _buffer_used; 458 assert(heap_region_byte_size > 0, "must archived at least one object!"); 459 460 if (UseCompressedOops) { 461 if (UseG1GC) { 462 address heap_end = (address)G1CollectedHeap::heap()->reserved().end(); 463 log_info(cds, heap)("Heap end = %p", heap_end); 464 _requested_bottom = align_down(heap_end - heap_region_byte_size, G1HeapRegion::GrainBytes); 465 _requested_bottom = align_down(_requested_bottom, MIN_GC_REGION_ALIGNMENT); 466 assert(is_aligned(_requested_bottom, G1HeapRegion::GrainBytes), "sanity"); 467 } else { 468 _requested_bottom = align_up(CompressedOops::begin(), MIN_GC_REGION_ALIGNMENT); 469 } 470 } else { 471 // We always write the objects as if the heap started at this address. This 472 // makes the contents of the archive heap deterministic. 473 // 474 // Note that at runtime, the heap address is selected by the OS, so the archive 475 // heap will not be mapped at 0x10000000, and the contents need to be patched. 476 _requested_bottom = align_up((address)NOCOOPS_REQUESTED_BASE, MIN_GC_REGION_ALIGNMENT); 477 } 478 479 assert(is_aligned(_requested_bottom, MIN_GC_REGION_ALIGNMENT), "sanity"); 480 481 _requested_top = _requested_bottom + _buffer_used; 482 483 info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0), 484 offset_to_buffered_address<HeapWord*>(_buffer_used))); 485 info->set_heap_root_segments(_heap_root_segments); 486 } 487 488 // Oop relocation 489 490 template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) { 491 assert(is_in_requested_range(cast_to_oop(p)), "must be"); 492 493 address addr = address(p); 494 assert(addr >= _requested_bottom, "must be"); 495 size_t offset = addr - _requested_bottom; 496 return offset_to_buffered_address<T*>(offset); 497 } 498 499 template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) { 500 oop o = load_oop_from_buffer(buffered_addr); 501 assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop"); 502 return o; 503 } 504 505 template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr, 506 oop request_oop) { 507 assert(is_in_requested_range(request_oop), "must be"); 508 store_oop_in_buffer(buffered_addr, request_oop); 509 } 510 511 inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) { 512 *buffered_addr = requested_obj; 513 } 514 515 inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) { 516 narrowOop val = CompressedOops::encode_not_null(requested_obj); 517 *buffered_addr = val; 518 } 519 520 oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) { 521 return *buffered_addr; 522 } 523 524 oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) { 525 return CompressedOops::decode(*buffered_addr); 526 } 527 528 template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) { 529 oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer); 530 if (!CompressedOops::is_null(source_referent)) { 531 oop request_referent = source_obj_to_requested_obj(source_referent); 532 store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent); 533 mark_oop_pointer<T>(field_addr_in_buffer, oopmap); 534 } 535 } 536 537 template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) { 538 T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr)); 539 address requested_region_bottom; 540 541 assert(request_p >= (T*)_requested_bottom, "sanity"); 542 assert(request_p < (T*)_requested_top, "sanity"); 543 requested_region_bottom = _requested_bottom; 544 545 // Mark the pointer in the oopmap 546 T* region_bottom = (T*)requested_region_bottom; 547 assert(request_p >= region_bottom, "must be"); 548 BitMap::idx_t idx = request_p - region_bottom; 549 assert(idx < oopmap->size(), "overflow"); 550 oopmap->set_bit(idx); 551 } 552 553 void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) { 554 assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses"); 555 narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass); 556 address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj)); 557 558 oop fake_oop = cast_to_oop(buffered_addr); 559 fake_oop->set_narrow_klass(nk); 560 561 if (src_obj == nullptr) { 562 return; 563 } 564 // We need to retain the identity_hash, because it may have been used by some hashtables 565 // in the shared heap. 566 if (!src_obj->fast_no_hash_check() && (!(EnableValhalla && src_obj->mark().is_inline_type()))) { 567 intptr_t src_hash = src_obj->identity_hash(); 568 fake_oop->set_mark(src_klass->prototype_header().copy_set_hash(src_hash)); 569 assert(fake_oop->mark().is_unlocked(), "sanity"); 570 571 DEBUG_ONLY(intptr_t archived_hash = fake_oop->identity_hash()); 572 assert(src_hash == archived_hash, "Different hash codes: original " INTPTR_FORMAT ", archived " INTPTR_FORMAT, src_hash, archived_hash); 573 } 574 // Strip age bits. 575 fake_oop->set_mark(fake_oop->mark().set_age(0)); 576 } 577 578 class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure { 579 oop _src_obj; 580 address _buffered_obj; 581 CHeapBitMap* _oopmap; 582 583 public: 584 EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) : 585 _src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {} 586 587 void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); } 588 void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); } 589 590 private: 591 template <class T> void do_oop_work(T *p) { 592 size_t field_offset = pointer_delta(p, _src_obj, sizeof(char)); 593 ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap); 594 } 595 }; 596 597 static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) { 598 // The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end). 599 size_t start = bitmap->find_first_set_bit(0); 600 size_t end = bitmap->size(); 601 log_info(cds)("%s = " SIZE_FORMAT_W(7) " ... " SIZE_FORMAT_W(7) " (%3zu%% ... %3zu%% = %3zu%%)", which, 602 start, end, 603 start * 100 / total_bits, 604 end * 100 / total_bits, 605 (end - start) * 100 / total_bits); 606 } 607 608 // Update all oop fields embedded in the buffered objects 609 void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, 610 ArchiveHeapInfo* heap_info) { 611 size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop)); 612 size_t heap_region_byte_size = _buffer_used; 613 heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit); 614 615 for (int i = 0; i < _source_objs_order->length(); i++) { 616 int src_obj_index = _source_objs_order->at(i)._index; 617 oop src_obj = _source_objs->at(src_obj_index); 618 HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj); 619 assert(info != nullptr, "must be"); 620 oop requested_obj = requested_obj_from_buffer_offset(info->buffer_offset()); 621 update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass()); 622 address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset()); 623 EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap()); 624 src_obj->oop_iterate(&relocator); 625 }; 626 627 // Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and 628 // doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it. 629 for (size_t seg_idx = 0; seg_idx < _heap_root_segments.count(); seg_idx++) { 630 size_t seg_offset = _heap_root_segments.segment_offset(seg_idx); 631 632 objArrayOop requested_obj = (objArrayOop)requested_obj_from_buffer_offset(seg_offset); 633 update_header_for_requested_obj(requested_obj, nullptr, Universe::objectArrayKlass()); 634 address buffered_obj = offset_to_buffered_address<address>(seg_offset); 635 int length = _heap_root_segments.size_in_elems(seg_idx); 636 637 if (UseCompressedOops) { 638 for (int i = 0; i < length; i++) { 639 narrowOop* addr = (narrowOop*)(buffered_obj + objArrayOopDesc::obj_at_offset<narrowOop>(i)); 640 relocate_field_in_buffer<narrowOop>(addr, heap_info->oopmap()); 641 } 642 } else { 643 for (int i = 0; i < length; i++) { 644 oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i)); 645 relocate_field_in_buffer<oop>(addr, heap_info->oopmap()); 646 } 647 } 648 } 649 650 compute_ptrmap(heap_info); 651 652 size_t total_bytes = (size_t)_buffer->length(); 653 log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop))); 654 log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address)); 655 } 656 657 void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) { 658 Metadata* ptr = src_obj->metadata_field_acquire(field_offset); 659 if (ptr != nullptr) { 660 NativePointerInfo info; 661 info._src_obj = src_obj; 662 info._field_offset = field_offset; 663 _native_pointers->append(info); 664 HeapShared::set_has_native_pointers(src_obj); 665 _num_native_ptrs ++; 666 } 667 } 668 669 // Do we have a jlong/jint field that's actually a pointer to a MetaspaceObj? 670 bool ArchiveHeapWriter::is_marked_as_native_pointer(ArchiveHeapInfo* heap_info, oop src_obj, int field_offset) { 671 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 672 assert(p != nullptr, "must be"); 673 674 // requested_field_addr = the address of this field in the requested space 675 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); 676 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset); 677 assert((Metadata**)_requested_bottom <= requested_field_addr && requested_field_addr < (Metadata**) _requested_top, "range check"); 678 679 BitMap::idx_t idx = requested_field_addr - (Metadata**) _requested_bottom; 680 // Leading zeros have been removed so some addresses may not be in the ptrmap 681 size_t start_pos = FileMapInfo::current_info()->heap_ptrmap_start_pos(); 682 if (idx < start_pos) { 683 return false; 684 } else { 685 idx -= start_pos; 686 } 687 return (idx < heap_info->ptrmap()->size()) && (heap_info->ptrmap()->at(idx) == true); 688 } 689 690 void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) { 691 int num_non_null_ptrs = 0; 692 Metadata** bottom = (Metadata**) _requested_bottom; 693 Metadata** top = (Metadata**) _requested_top; // exclusive 694 heap_info->ptrmap()->resize(top - bottom); 695 696 BitMap::idx_t max_idx = 32; // paranoid - don't make it too small 697 for (int i = 0; i < _native_pointers->length(); i++) { 698 NativePointerInfo info = _native_pointers->at(i); 699 oop src_obj = info._src_obj; 700 int field_offset = info._field_offset; 701 HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj); 702 // requested_field_addr = the address of this field in the requested space 703 oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset()); 704 Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset); 705 assert(bottom <= requested_field_addr && requested_field_addr < top, "range check"); 706 707 // Mark this field in the bitmap 708 BitMap::idx_t idx = requested_field_addr - bottom; 709 heap_info->ptrmap()->set_bit(idx); 710 num_non_null_ptrs ++; 711 max_idx = MAX2(max_idx, idx); 712 713 // Set the native pointer to the requested address of the metadata (at runtime, the metadata will have 714 // this address if the RO/RW regions are mapped at the default location). 715 716 Metadata** buffered_field_addr = requested_addr_to_buffered_addr(requested_field_addr); 717 Metadata* native_ptr = *buffered_field_addr; 718 assert(native_ptr != nullptr, "sanity"); 719 720 address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr); 721 address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr); 722 *buffered_field_addr = (Metadata*)requested_native_ptr; 723 } 724 725 heap_info->ptrmap()->resize(max_idx + 1); 726 log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)", 727 num_non_null_ptrs, size_t(heap_info->ptrmap()->size())); 728 } 729 730 #endif // INCLUDE_CDS_JAVA_HEAP